code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
import csv
from django.contrib import admin
from vaas.manager.models import Director, Backend, Probe, TimeProfile
from vaas.manager.forms import DirectorModelForm
from django.contrib.auth.models import User
from django.contrib.auth.models import Group
from taggit.models import Tag
from taggit.admin import TagAdmin
from tastypie.models import ApiKey
from django.utils.html import format_html
from vaas.monitor.models import BackendStatus
from vaas.manager.signals import switch_state_and_reload
from django.http import HttpResponse
from django.utils.encoding import smart_str
from vaas.external.audit import AuditableModelAdmin
try:
admin.site.unregister(Group)
admin.site.unregister(User)
admin.site.unregister(ApiKey)
admin.site.unregister(Tag)
admin.site.unregister(TagAdmin)
except:
pass
def enable_backend(modeladmin, request, queryset):
switch_state_and_reload(queryset, True)
def disable_backend(modeladmin, request, queryset):
switch_state_and_reload(queryset, False)
def switch_backend_status(modeladmin, request, queryset):
enabledSet = Backend.objects.filter(pk__in=map(lambda backend: backend.pk, queryset.filter(enabled=True)))
disabledSet = Backend.objects.filter(pk__in=map(lambda backend: backend.pk, queryset.filter(enabled=False)))
switch_state_and_reload(disabledSet, True)
switch_state_and_reload(enabledSet, False)
def export_to_csv(modeladmin, request, queryset):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=backend_list.csv'
writer = csv.writer(response, csv.excel)
response.write('\ufeff'.encode('utf8'))
writer.writerow([
smart_str("id"),
smart_str("address"),
smart_str("port"),
smart_str("director"),
smart_str("dc"),
smart_str("status"),
smart_str("enabled"),
smart_str("inherit_time_profile"),
smart_str("weight"),
smart_str("tags")
])
backend_status_list = BackendStatus.objects.all()
for obj in queryset:
status_list = list(filter(
lambda backend_status: backend_status.address == obj.address and backend_status.port == obj.port,
backend_status_list
))
status = 'unknown'
if len(status_list) == 1:
status = status_list[0].status
writer.writerow([
smart_str(obj.pk),
smart_str(obj.address),
smart_str(obj.port),
smart_str(obj.director),
smart_str(obj.dc),
smart_str(status),
smart_str(obj.enabled),
smart_str(obj.inherit_time_profile),
smart_str(obj.weight),
smart_str(obj.tags.all())
])
return response
def enable_director(modeladmin, request, queryset):
switch_state_and_reload(queryset, True)
def disable_director(modeladmin, request, queryset):
switch_state_and_reload(queryset, False)
class DirectorAdmin(AuditableModelAdmin):
search_fields = ['name', 'route_expression']
form = DirectorModelForm
list_display = (
'name', 'service', 'service_mesh_label', 'get_clusters', 'route_expression', 'probe', 'protocol',
'custom_enabled', 'virtual',)
list_filter = ['cluster__name', 'service']
actions = [enable_director, disable_director]
def get_clusters(self, obj):
"""Return string with newline separated clusters for directory passed as argument"""
return "\n".join([cluster.name for cluster in obj.cluster.all()])
get_clusters.short_description = 'Clusters'
def get_form(self, request, obj=None, **kwargs):
form = super(DirectorAdmin, self).get_form(request, obj, **kwargs)
form.base_fields['cluster'].widget.can_add_related = False
return form
def custom_enabled(self, obj):
if obj.enabled:
return format_html(
"<div class='span13 text-center'>" +
"<a class='btn btn-xs btn-success' href='#'><i class='glyphicon glyphicon-ok-sign'></i></a>" +
"</div>"
)
else:
return format_html(
"<div class='span13 text-center'><a class='btn btn-xs' href='#'>" +
"<i class='glyphicon glyphicon-ban-circle'>" +
"</i></a></div>"
)
custom_enabled.short_description = 'Enabled'
class BackendAdmin(AuditableModelAdmin):
search_fields = ['address', 'director__name', 'tags__name']
list_display = ('address', 'port', 'director', 'dc', 'is_healthy', 'custom_enabled', 'get_tags')
list_filter = ['director__name', 'director__service', 'director__cluster__name', 'dc__symbol']
actions = [enable_backend, disable_backend, switch_backend_status, export_to_csv]
fieldsets = (
(None, {
'fields': ('address', 'port', 'director', 'dc', 'weight', 'tags', 'inherit_time_profile')
}),
('Advanced options', {
'fields': ('max_connections', 'connect_timeout', 'first_byte_timeout', 'between_bytes_timeout')
}),
)
backend_status_list = []
def get_form(self, request, obj=None, **kwargs):
form = super(BackendAdmin, self).get_form(request, obj, **kwargs)
form.base_fields['dc'].widget.can_add_related = False
return form
def get_list_display(self, request):
self.backend_status_list = BackendStatus.objects.all()
return super(BackendAdmin, self).get_list_display(request)
def get_tags(self, obj):
return ", ".join([tag.name for tag in obj.tags.all()])
get_tags.short_description = 'Tags'
def custom_enabled(self, obj):
if obj.enabled:
return format_html(
"<div class='span13 text-center'>" +
"<a class='btn btn-xs btn-success' href='#'><i class='glyphicon glyphicon-ok-sign'></i></a>" +
"</div>"
)
else:
return format_html(
"<div class='span13 text-center'><a class='btn btn-xs' href='#'>" +
"<i class='glyphicon glyphicon-ban-circle'>" +
"</i></a></div>"
)
custom_enabled.short_description = 'Enabled'
def is_healthy(self, obj):
status_list = list(filter(
lambda backend_status: backend_status.address == obj.address and backend_status.port == obj.port,
self.backend_status_list
))
if len(status_list) == 1:
if status_list[0].status == 'Healthy':
return format_html(
"<div class='span13 text-center'><a class='btn btn-xs btn-success' href='#'>" +
"<i class='glyphicon glyphicon-ok'> </i></a></div>"
)
else:
return format_html(
"<div class='span13 text-center'><a class='btn btn-xs btn-danger' href='#'>" +
"<i class='glyphicon glyphicon-off'> </i></a></div>"
)
else:
return format_html(
"<div class='span13 text-center'><a class='btn btn-xs' href='#'>" +
"<i class='glyphicon glyphicon-off'></i></a></div>"
)
class Media:
js = ('js/switch-inherit-profile.js',)
class ProbeAdmin(admin.ModelAdmin):
fieldsets = (
(None, {
'fields': ('name', 'url', 'expected_response', 'start_as_healthy')
}),
('Advanced options', {
'classes': ('collapse',),
'fields': ('interval', 'timeout', 'window', 'threshold')
}),
)
class TimeProfileAdmin(admin.ModelAdmin):
list_display = ('name', 'max_connections', 'connect_timeout', 'first_byte_timeout', 'between_bytes_timeout')
admin.site.register(Director, DirectorAdmin)
admin.site.register(Backend, BackendAdmin)
admin.site.register(Probe, ProbeAdmin)
admin.site.register(TimeProfile, TimeProfileAdmin) | vaas-app/src/vaas/manager/admin.py | import csv
from django.contrib import admin
from vaas.manager.models import Director, Backend, Probe, TimeProfile
from vaas.manager.forms import DirectorModelForm
from django.contrib.auth.models import User
from django.contrib.auth.models import Group
from taggit.models import Tag
from taggit.admin import TagAdmin
from tastypie.models import ApiKey
from django.utils.html import format_html
from vaas.monitor.models import BackendStatus
from vaas.manager.signals import switch_state_and_reload
from django.http import HttpResponse
from django.utils.encoding import smart_str
from vaas.external.audit import AuditableModelAdmin
try:
admin.site.unregister(Group)
admin.site.unregister(User)
admin.site.unregister(ApiKey)
admin.site.unregister(Tag)
admin.site.unregister(TagAdmin)
except:
pass
def enable_backend(modeladmin, request, queryset):
switch_state_and_reload(queryset, True)
def disable_backend(modeladmin, request, queryset):
switch_state_and_reload(queryset, False)
def switch_backend_status(modeladmin, request, queryset):
enabledSet = Backend.objects.filter(pk__in=map(lambda backend: backend.pk, queryset.filter(enabled=True)))
disabledSet = Backend.objects.filter(pk__in=map(lambda backend: backend.pk, queryset.filter(enabled=False)))
switch_state_and_reload(disabledSet, True)
switch_state_and_reload(enabledSet, False)
def export_to_csv(modeladmin, request, queryset):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=backend_list.csv'
writer = csv.writer(response, csv.excel)
response.write('\ufeff'.encode('utf8'))
writer.writerow([
smart_str("id"),
smart_str("address"),
smart_str("port"),
smart_str("director"),
smart_str("dc"),
smart_str("status"),
smart_str("enabled"),
smart_str("inherit_time_profile"),
smart_str("weight"),
smart_str("tags")
])
backend_status_list = BackendStatus.objects.all()
for obj in queryset:
status_list = list(filter(
lambda backend_status: backend_status.address == obj.address and backend_status.port == obj.port,
backend_status_list
))
status = 'unknown'
if len(status_list) == 1:
status = status_list[0].status
writer.writerow([
smart_str(obj.pk),
smart_str(obj.address),
smart_str(obj.port),
smart_str(obj.director),
smart_str(obj.dc),
smart_str(status),
smart_str(obj.enabled),
smart_str(obj.inherit_time_profile),
smart_str(obj.weight),
smart_str(obj.tags.all())
])
return response
def enable_director(modeladmin, request, queryset):
switch_state_and_reload(queryset, True)
def disable_director(modeladmin, request, queryset):
switch_state_and_reload(queryset, False)
class DirectorAdmin(AuditableModelAdmin):
search_fields = ['name', 'route_expression']
form = DirectorModelForm
list_display = (
'name', 'service', 'service_mesh_label', 'get_clusters', 'route_expression', 'probe', 'protocol',
'custom_enabled', 'virtual',)
list_filter = ['cluster__name', 'service']
actions = [enable_director, disable_director]
def get_clusters(self, obj):
"""Return string with newline separated clusters for directory passed as argument"""
return "\n".join([cluster.name for cluster in obj.cluster.all()])
get_clusters.short_description = 'Clusters'
def get_form(self, request, obj=None, **kwargs):
form = super(DirectorAdmin, self).get_form(request, obj, **kwargs)
form.base_fields['cluster'].widget.can_add_related = False
return form
def custom_enabled(self, obj):
if obj.enabled:
return format_html(
"<div class='span13 text-center'>" +
"<a class='btn btn-xs btn-success' href='#'><i class='glyphicon glyphicon-ok-sign'></i></a>" +
"</div>"
)
else:
return format_html(
"<div class='span13 text-center'><a class='btn btn-xs' href='#'>" +
"<i class='glyphicon glyphicon-ban-circle'>" +
"</i></a></div>"
)
custom_enabled.short_description = 'Enabled'
class BackendAdmin(AuditableModelAdmin):
search_fields = ['address', 'director__name', 'tags__name']
list_display = ('address', 'port', 'director', 'dc', 'is_healthy', 'custom_enabled', 'get_tags')
list_filter = ['director__name', 'director__service', 'director__cluster__name', 'dc__symbol']
actions = [enable_backend, disable_backend, switch_backend_status, export_to_csv]
fieldsets = (
(None, {
'fields': ('address', 'port', 'director', 'dc', 'weight', 'tags', 'inherit_time_profile')
}),
('Advanced options', {
'fields': ('max_connections', 'connect_timeout', 'first_byte_timeout', 'between_bytes_timeout')
}),
)
backend_status_list = []
def get_form(self, request, obj=None, **kwargs):
form = super(BackendAdmin, self).get_form(request, obj, **kwargs)
form.base_fields['dc'].widget.can_add_related = False
return form
def get_list_display(self, request):
self.backend_status_list = BackendStatus.objects.all()
return super(BackendAdmin, self).get_list_display(request)
def get_tags(self, obj):
return ", ".join([tag.name for tag in obj.tags.all()])
get_tags.short_description = 'Tags'
def custom_enabled(self, obj):
if obj.enabled:
return format_html(
"<div class='span13 text-center'>" +
"<a class='btn btn-xs btn-success' href='#'><i class='glyphicon glyphicon-ok-sign'></i></a>" +
"</div>"
)
else:
return format_html(
"<div class='span13 text-center'><a class='btn btn-xs' href='#'>" +
"<i class='glyphicon glyphicon-ban-circle'>" +
"</i></a></div>"
)
custom_enabled.short_description = 'Enabled'
def is_healthy(self, obj):
status_list = list(filter(
lambda backend_status: backend_status.address == obj.address and backend_status.port == obj.port,
self.backend_status_list
))
if len(status_list) == 1:
if status_list[0].status == 'Healthy':
return format_html(
"<div class='span13 text-center'><a class='btn btn-xs btn-success' href='#'>" +
"<i class='glyphicon glyphicon-ok'> </i></a></div>"
)
else:
return format_html(
"<div class='span13 text-center'><a class='btn btn-xs btn-danger' href='#'>" +
"<i class='glyphicon glyphicon-off'> </i></a></div>"
)
else:
return format_html(
"<div class='span13 text-center'><a class='btn btn-xs' href='#'>" +
"<i class='glyphicon glyphicon-off'></i></a></div>"
)
class Media:
js = ('js/switch-inherit-profile.js',)
class ProbeAdmin(admin.ModelAdmin):
fieldsets = (
(None, {
'fields': ('name', 'url', 'expected_response', 'start_as_healthy')
}),
('Advanced options', {
'classes': ('collapse',),
'fields': ('interval', 'timeout', 'window', 'threshold')
}),
)
class TimeProfileAdmin(admin.ModelAdmin):
list_display = ('name', 'max_connections', 'connect_timeout', 'first_byte_timeout', 'between_bytes_timeout')
admin.site.register(Director, DirectorAdmin)
admin.site.register(Backend, BackendAdmin)
admin.site.register(Probe, ProbeAdmin)
admin.site.register(TimeProfile, TimeProfileAdmin) | 0.410284 | 0.07538 |
from datetime import timedelta
import logging
from aiohttp.client_exceptions import ClientError
from pyeconet import EcoNetApiInterface
from pyeconet.equipment import EquipmentType
from pyeconet.errors import (
GenericHTTPError,
InvalidCredentialsError,
InvalidResponseFormat,
PyeconetError,
)
from openpeerpower.const import CONF_EMAIL, CONF_PASSWORD, TEMP_FAHRENHEIT
from openpeerpower.core import callback
from openpeerpower.exceptions import ConfigEntryNotReady
from openpeerpower.helpers.dispatcher import dispatcher_send
from openpeerpower.helpers.entity import Entity
from openpeerpower.helpers.event import async_track_time_interval
from .const import API_CLIENT, DOMAIN, EQUIPMENT
_LOGGER = logging.getLogger(__name__)
PLATFORMS = ["climate", "binary_sensor", "sensor", "water_heater"]
PUSH_UPDATE = "econet.push_update"
INTERVAL = timedelta(minutes=60)
async def async_setup(opp, config):
"""Set up the EcoNet component."""
opp.data[DOMAIN] = {}
opp.data[DOMAIN][API_CLIENT] = {}
opp.data[DOMAIN][EQUIPMENT] = {}
return True
async def async_setup_entry(opp, config_entry):
"""Set up EcoNet as config entry."""
email = config_entry.data[CONF_EMAIL]
password = config_entry.data[CONF_PASSWORD]
try:
api = await EcoNetApiInterface.login(email, password=password)
except InvalidCredentialsError:
_LOGGER.error("Invalid credentials provided")
return False
except PyeconetError as err:
_LOGGER.error("Config entry failed: %s", err)
raise ConfigEntryNotReady from err
try:
equipment = await api.get_equipment_by_type(
[EquipmentType.WATER_HEATER, EquipmentType.THERMOSTAT]
)
except (ClientError, GenericHTTPError, InvalidResponseFormat) as err:
raise ConfigEntryNotReady from err
opp.data[DOMAIN][API_CLIENT][config_entry.entry_id] = api
opp.data[DOMAIN][EQUIPMENT][config_entry.entry_id] = equipment
opp.config_entries.async_setup_platforms(config_entry, PLATFORMS)
api.subscribe()
def update_published():
"""Handle a push update."""
dispatcher_send(opp, PUSH_UPDATE)
for _eqip in equipment[EquipmentType.WATER_HEATER]:
_eqip.set_update_callback(update_published)
for _eqip in equipment[EquipmentType.THERMOSTAT]:
_eqip.set_update_callback(update_published)
async def resubscribe(now):
"""Resubscribe to the MQTT updates."""
await opp.async_add_executor_job(api.unsubscribe)
api.subscribe()
async def fetch_update(now):
"""Fetch the latest changes from the API."""
await api.refresh_equipment()
config_entry.async_on_unload(async_track_time_interval(opp, resubscribe, INTERVAL))
config_entry.async_on_unload(
async_track_time_interval(opp, fetch_update, INTERVAL + timedelta(minutes=1))
)
return True
async def async_unload_entry(opp, entry):
"""Unload a EcoNet config entry."""
unload_ok = await opp.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
opp.data[DOMAIN][API_CLIENT].pop(entry.entry_id)
opp.data[DOMAIN][EQUIPMENT].pop(entry.entry_id)
return unload_ok
class EcoNetEntity(Entity):
"""Define a base EcoNet entity."""
def __init__(self, econet):
"""Initialize."""
self._econet = econet
async def async_added_to_opp(self):
"""Subscribe to device events."""
await super().async_added_to_opp()
self.async_on_remove(
self.opp.helpers.dispatcher.async_dispatcher_connect(
PUSH_UPDATE, self.on_update_received
)
)
@callback
def on_update_received(self):
"""Update was pushed from the ecoent API."""
self.async_write_op_state()
@property
def available(self):
"""Return if the the device is online or not."""
return self._econet.connected
@property
def device_info(self):
"""Return device registry information for this entity."""
return {
"identifiers": {(DOMAIN, self._econet.device_id)},
"manufacturer": "Rheem",
"name": self._econet.device_name,
}
@property
def name(self):
"""Return the name of the entity."""
return self._econet.device_name
@property
def unique_id(self):
"""Return the unique ID of the entity."""
return f"{self._econet.device_id}_{self._econet.device_name}"
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_FAHRENHEIT
@property
def should_poll(self) -> bool:
"""Return True if entity has to be polled for state.
False if entity pushes its state to HA.
"""
return False | openpeerpower/components/econet/__init__.py | from datetime import timedelta
import logging
from aiohttp.client_exceptions import ClientError
from pyeconet import EcoNetApiInterface
from pyeconet.equipment import EquipmentType
from pyeconet.errors import (
GenericHTTPError,
InvalidCredentialsError,
InvalidResponseFormat,
PyeconetError,
)
from openpeerpower.const import CONF_EMAIL, CONF_PASSWORD, TEMP_FAHRENHEIT
from openpeerpower.core import callback
from openpeerpower.exceptions import ConfigEntryNotReady
from openpeerpower.helpers.dispatcher import dispatcher_send
from openpeerpower.helpers.entity import Entity
from openpeerpower.helpers.event import async_track_time_interval
from .const import API_CLIENT, DOMAIN, EQUIPMENT
_LOGGER = logging.getLogger(__name__)
PLATFORMS = ["climate", "binary_sensor", "sensor", "water_heater"]
PUSH_UPDATE = "econet.push_update"
INTERVAL = timedelta(minutes=60)
async def async_setup(opp, config):
"""Set up the EcoNet component."""
opp.data[DOMAIN] = {}
opp.data[DOMAIN][API_CLIENT] = {}
opp.data[DOMAIN][EQUIPMENT] = {}
return True
async def async_setup_entry(opp, config_entry):
"""Set up EcoNet as config entry."""
email = config_entry.data[CONF_EMAIL]
password = config_entry.data[CONF_PASSWORD]
try:
api = await EcoNetApiInterface.login(email, password=password)
except InvalidCredentialsError:
_LOGGER.error("Invalid credentials provided")
return False
except PyeconetError as err:
_LOGGER.error("Config entry failed: %s", err)
raise ConfigEntryNotReady from err
try:
equipment = await api.get_equipment_by_type(
[EquipmentType.WATER_HEATER, EquipmentType.THERMOSTAT]
)
except (ClientError, GenericHTTPError, InvalidResponseFormat) as err:
raise ConfigEntryNotReady from err
opp.data[DOMAIN][API_CLIENT][config_entry.entry_id] = api
opp.data[DOMAIN][EQUIPMENT][config_entry.entry_id] = equipment
opp.config_entries.async_setup_platforms(config_entry, PLATFORMS)
api.subscribe()
def update_published():
"""Handle a push update."""
dispatcher_send(opp, PUSH_UPDATE)
for _eqip in equipment[EquipmentType.WATER_HEATER]:
_eqip.set_update_callback(update_published)
for _eqip in equipment[EquipmentType.THERMOSTAT]:
_eqip.set_update_callback(update_published)
async def resubscribe(now):
"""Resubscribe to the MQTT updates."""
await opp.async_add_executor_job(api.unsubscribe)
api.subscribe()
async def fetch_update(now):
"""Fetch the latest changes from the API."""
await api.refresh_equipment()
config_entry.async_on_unload(async_track_time_interval(opp, resubscribe, INTERVAL))
config_entry.async_on_unload(
async_track_time_interval(opp, fetch_update, INTERVAL + timedelta(minutes=1))
)
return True
async def async_unload_entry(opp, entry):
"""Unload a EcoNet config entry."""
unload_ok = await opp.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
opp.data[DOMAIN][API_CLIENT].pop(entry.entry_id)
opp.data[DOMAIN][EQUIPMENT].pop(entry.entry_id)
return unload_ok
class EcoNetEntity(Entity):
"""Define a base EcoNet entity."""
def __init__(self, econet):
"""Initialize."""
self._econet = econet
async def async_added_to_opp(self):
"""Subscribe to device events."""
await super().async_added_to_opp()
self.async_on_remove(
self.opp.helpers.dispatcher.async_dispatcher_connect(
PUSH_UPDATE, self.on_update_received
)
)
@callback
def on_update_received(self):
"""Update was pushed from the ecoent API."""
self.async_write_op_state()
@property
def available(self):
"""Return if the the device is online or not."""
return self._econet.connected
@property
def device_info(self):
"""Return device registry information for this entity."""
return {
"identifiers": {(DOMAIN, self._econet.device_id)},
"manufacturer": "Rheem",
"name": self._econet.device_name,
}
@property
def name(self):
"""Return the name of the entity."""
return self._econet.device_name
@property
def unique_id(self):
"""Return the unique ID of the entity."""
return f"{self._econet.device_id}_{self._econet.device_name}"
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_FAHRENHEIT
@property
def should_poll(self) -> bool:
"""Return True if entity has to be polled for state.
False if entity pushes its state to HA.
"""
return False | 0.750553 | 0.079639 |
from flask_wtf import FlaskForm
from wtforms import SelectField, DecimalField, StringField, TextAreaField, DateField, FileField, MultipleFileField, PasswordField, SubmitField
from wtforms.validators import DataRequired, Length, Email, EqualTo
persons_choices = list(range(1,13)) # From 1 to 12 people choice
for i in range(0, len(persons_choices)):
persons_choices[i] = (str(persons_choices[i]), str(persons_choices[i]))
class SearchForm(FlaskForm):
address = StringField('Posizione')
start_date = DateField('Inizio Soggiorno', format='%d/%m/%Y', validators=[DataRequired()])
end_date = DateField('Fine Soggiorno', format='%d/%m/%Y', validators=[DataRequired()])
persons = SelectField('Numero di Persone', choices=persons_choices, validators=[DataRequired()])
submit = SubmitField('Cerca')
class RegistrationForm(FlaskForm):
name = StringField('Nome', validators=[DataRequired(), Length(min=2, max=30)])
surname = StringField('Cognome', validators=[DataRequired(), Length(min=2, max=30)])
username = StringField('Username', validators=[DataRequired(), Length(min=2, max=20)])
email = StringField('Email', validators=[DataRequired(), Length(min=2,max=40), Email()])
privilege = SelectField('Tipologia', choices=[('True', 'Proprietario'), ('False', 'Affittuario')], validators=[DataRequired()])
birth_date = DateField('Data di nascita', format='%d/%m/%Y')
password = PasswordField('Password', validators=[DataRequired(), Length(min=5,max=20)])
confirm_password = PasswordField('<PASSWORD>', validators=[DataRequired(), EqualTo('password')])
register = SubmitField('Registrati')
class LoginForm(FlaskForm):
username_email = StringField('Username/Email', validators=[DataRequired(), Length(min=2,max=40)])
password = PasswordField('Password', validators=[DataRequired(), Length(min=5,max=20)])
login = SubmitField('Accedi')
class ProfilePictureForm(FlaskForm):
image = FileField()
class AddRoomForm(FlaskForm):
name = StringField('Nome', validators=[DataRequired(), Length(min=5, max=20)])
description = TextAreaField('Descrizione opzionale')
address = StringField('Posizione', validators=[DataRequired()])
available_from = DateField('Inizio disponibilità', format='%d/%m/%Y')
available_to = DateField('Fine disponibilità', format='%d/%m/%Y')
price = DecimalField('Prezzo', validators=[DataRequired()])
max_persons = SelectField('Numero massimo di Persone', choices=persons_choices, validators=[DataRequired()])
pictures = MultipleFileField('Immagini della stanza')
submit = SubmitField('Aggiungi stanza')
class PrenotationForm(FlaskForm):
start_date = DateField('Inizio prenotazione', format='%d/%m/%Y', validators=[DataRequired()])
end_date = DateField('Fine prenotazione', format='%d/%m/%Y', validators=[DataRequired()])
persons = SelectField('Numero di Persone', choices=persons_choices, validators=[DataRequired()])
submit = SubmitField('Prenota') | ecommerce/forms.py | from flask_wtf import FlaskForm
from wtforms import SelectField, DecimalField, StringField, TextAreaField, DateField, FileField, MultipleFileField, PasswordField, SubmitField
from wtforms.validators import DataRequired, Length, Email, EqualTo
persons_choices = list(range(1,13)) # From 1 to 12 people choice
for i in range(0, len(persons_choices)):
persons_choices[i] = (str(persons_choices[i]), str(persons_choices[i]))
class SearchForm(FlaskForm):
address = StringField('Posizione')
start_date = DateField('Inizio Soggiorno', format='%d/%m/%Y', validators=[DataRequired()])
end_date = DateField('Fine Soggiorno', format='%d/%m/%Y', validators=[DataRequired()])
persons = SelectField('Numero di Persone', choices=persons_choices, validators=[DataRequired()])
submit = SubmitField('Cerca')
class RegistrationForm(FlaskForm):
name = StringField('Nome', validators=[DataRequired(), Length(min=2, max=30)])
surname = StringField('Cognome', validators=[DataRequired(), Length(min=2, max=30)])
username = StringField('Username', validators=[DataRequired(), Length(min=2, max=20)])
email = StringField('Email', validators=[DataRequired(), Length(min=2,max=40), Email()])
privilege = SelectField('Tipologia', choices=[('True', 'Proprietario'), ('False', 'Affittuario')], validators=[DataRequired()])
birth_date = DateField('Data di nascita', format='%d/%m/%Y')
password = PasswordField('Password', validators=[DataRequired(), Length(min=5,max=20)])
confirm_password = PasswordField('<PASSWORD>', validators=[DataRequired(), EqualTo('password')])
register = SubmitField('Registrati')
class LoginForm(FlaskForm):
username_email = StringField('Username/Email', validators=[DataRequired(), Length(min=2,max=40)])
password = PasswordField('Password', validators=[DataRequired(), Length(min=5,max=20)])
login = SubmitField('Accedi')
class ProfilePictureForm(FlaskForm):
image = FileField()
class AddRoomForm(FlaskForm):
name = StringField('Nome', validators=[DataRequired(), Length(min=5, max=20)])
description = TextAreaField('Descrizione opzionale')
address = StringField('Posizione', validators=[DataRequired()])
available_from = DateField('Inizio disponibilità', format='%d/%m/%Y')
available_to = DateField('Fine disponibilità', format='%d/%m/%Y')
price = DecimalField('Prezzo', validators=[DataRequired()])
max_persons = SelectField('Numero massimo di Persone', choices=persons_choices, validators=[DataRequired()])
pictures = MultipleFileField('Immagini della stanza')
submit = SubmitField('Aggiungi stanza')
class PrenotationForm(FlaskForm):
start_date = DateField('Inizio prenotazione', format='%d/%m/%Y', validators=[DataRequired()])
end_date = DateField('Fine prenotazione', format='%d/%m/%Y', validators=[DataRequired()])
persons = SelectField('Numero di Persone', choices=persons_choices, validators=[DataRequired()])
submit = SubmitField('Prenota') | 0.358129 | 0.199639 |
import ctypes
from numpy.ctypeslib import ndpointer
import os
import time
def getMatching_fast(numNodes,nodes1, nodes2, weights):
numEdges=len(nodes1);
PMlib=ctypes.CDLL("%s/PMlib.so"%"/".join((os.path.realpath(__file__)).split("/")[:-1]))
PMlib.pyMatching.argtypes = [ctypes.c_int,ctypes.c_int,ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int)]
PMlib.pyMatching.restype = ndpointer(dtype=ctypes.c_int, shape=(numNodes,))
# initialize ctypes array and fill with edge data
n1=(ctypes.c_int*numEdges)();
n2=(ctypes.c_int*numEdges)();
w=(ctypes.c_int*numEdges)();
for i in range(numEdges):
n1[i],n2[i],w[i]=nodes1[i],nodes2[i],weights[i]
result=PMlib.pyMatching(ctypes.c_int(numNodes),ctypes.c_int(numEdges),n1,n2,w)
return result
def getMatching(numNodes,graphArray):
mtime0 = time.time()
numEdges=len(graphArray);
PMlib=ctypes.CDLL("%s/PMlib.so"%"/".join((os.path.realpath(__file__)).split("/")[:-1]))
PMlib.pyMatching.argtypes = [ctypes.c_int,ctypes.c_int,ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int)]
PMlib.pyMatching.restype = ndpointer(dtype=ctypes.c_int, shape=(numNodes,))
# initialize ctypes array and fill with edge data
nodes1=(ctypes.c_int*numEdges)();
nodes2=(ctypes.c_int*numEdges)();
weights=(ctypes.c_int*numEdges)();
#c_int_array = ctypes.c_int*numEdges
# nodes1 = c_int_array(*[graphArray[i][0] for i in range(numEdges)])
for i in range(numEdges):
nodes1[i]=graphArray[i][0]
nodes2[i]=graphArray[i][1]
weights[i]=graphArray[i][2]
# mtime1 = time.time()
# print "matching overhead = ", mtime1-mtime0
result=PMlib.pyMatching(ctypes.c_int(numNodes),ctypes.c_int(numEdges),nodes1,nodes2,weights)
# mtime2 = time.time()
# print "matching time = ",mtime2 - mtime1
return result
# pyInterface.o
# example.o
# misc.o
# PMduals.o
# PMexpand.o
# PMinit.o
# PMinterface.o
# PMmain.o
# PMrepair.o
# PMshrink.o
# MinCost/MinCost.o
# GEOM/GPMinit.o
# GEOM/GPMinterface.o
# GEOM/GPMkdtree.o
# GEOM/GPMmain.o
#compile all these files as:
# g++ -c -fPIC filename.cpp -lrt
# then compile all .o files into a shared library
# g++ -shared filename1.o filename2.o .... -o PMlib.so -lrt
# NOTE: the -lrt must come AFTER the filename | project/CodePy2/extra/blossom5/pyMatch.py | import ctypes
from numpy.ctypeslib import ndpointer
import os
import time
def getMatching_fast(numNodes,nodes1, nodes2, weights):
numEdges=len(nodes1);
PMlib=ctypes.CDLL("%s/PMlib.so"%"/".join((os.path.realpath(__file__)).split("/")[:-1]))
PMlib.pyMatching.argtypes = [ctypes.c_int,ctypes.c_int,ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int)]
PMlib.pyMatching.restype = ndpointer(dtype=ctypes.c_int, shape=(numNodes,))
# initialize ctypes array and fill with edge data
n1=(ctypes.c_int*numEdges)();
n2=(ctypes.c_int*numEdges)();
w=(ctypes.c_int*numEdges)();
for i in range(numEdges):
n1[i],n2[i],w[i]=nodes1[i],nodes2[i],weights[i]
result=PMlib.pyMatching(ctypes.c_int(numNodes),ctypes.c_int(numEdges),n1,n2,w)
return result
def getMatching(numNodes,graphArray):
mtime0 = time.time()
numEdges=len(graphArray);
PMlib=ctypes.CDLL("%s/PMlib.so"%"/".join((os.path.realpath(__file__)).split("/")[:-1]))
PMlib.pyMatching.argtypes = [ctypes.c_int,ctypes.c_int,ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int)]
PMlib.pyMatching.restype = ndpointer(dtype=ctypes.c_int, shape=(numNodes,))
# initialize ctypes array and fill with edge data
nodes1=(ctypes.c_int*numEdges)();
nodes2=(ctypes.c_int*numEdges)();
weights=(ctypes.c_int*numEdges)();
#c_int_array = ctypes.c_int*numEdges
# nodes1 = c_int_array(*[graphArray[i][0] for i in range(numEdges)])
for i in range(numEdges):
nodes1[i]=graphArray[i][0]
nodes2[i]=graphArray[i][1]
weights[i]=graphArray[i][2]
# mtime1 = time.time()
# print "matching overhead = ", mtime1-mtime0
result=PMlib.pyMatching(ctypes.c_int(numNodes),ctypes.c_int(numEdges),nodes1,nodes2,weights)
# mtime2 = time.time()
# print "matching time = ",mtime2 - mtime1
return result
# pyInterface.o
# example.o
# misc.o
# PMduals.o
# PMexpand.o
# PMinit.o
# PMinterface.o
# PMmain.o
# PMrepair.o
# PMshrink.o
# MinCost/MinCost.o
# GEOM/GPMinit.o
# GEOM/GPMinterface.o
# GEOM/GPMkdtree.o
# GEOM/GPMmain.o
#compile all these files as:
# g++ -c -fPIC filename.cpp -lrt
# then compile all .o files into a shared library
# g++ -shared filename1.o filename2.o .... -o PMlib.so -lrt
# NOTE: the -lrt must come AFTER the filename | 0.119794 | 0.224013 |
# Export this package's modules as members:
from .accelerator import *
from .bandwidth_package import *
from .bandwidth_package_attachment import *
from .endpoint_group import *
from .forwarding_rule import *
from .get_accelerators import *
from .get_bandwidth_packages import *
from .get_endpoint_groups import *
from .get_forwarding_rules import *
from .get_ip_sets import *
from .get_listeners import *
from .ip_set import *
from .listener import *
from ._inputs import *
from . import outputs
def _register_module():
import pulumi
from .. import _utilities
class Module(pulumi.runtime.ResourceModule):
_version = _utilities.get_semver_version()
def version(self):
return Module._version
def construct(self, name: str, typ: str, urn: str) -> pulumi.Resource:
if typ == "alicloud:ga/accelerator:Accelerator":
return Accelerator(name, pulumi.ResourceOptions(urn=urn))
elif typ == "alicloud:ga/bandwidthPackage:BandwidthPackage":
return BandwidthPackage(name, pulumi.ResourceOptions(urn=urn))
elif typ == "alicloud:ga/bandwidthPackageAttachment:BandwidthPackageAttachment":
return BandwidthPackageAttachment(name, pulumi.ResourceOptions(urn=urn))
elif typ == "alicloud:ga/endpointGroup:EndpointGroup":
return EndpointGroup(name, pulumi.ResourceOptions(urn=urn))
elif typ == "alicloud:ga/forwardingRule:ForwardingRule":
return ForwardingRule(name, pulumi.ResourceOptions(urn=urn))
elif typ == "alicloud:ga/ipSet:IpSet":
return IpSet(name, pulumi.ResourceOptions(urn=urn))
elif typ == "alicloud:ga/listener:Listener":
return Listener(name, pulumi.ResourceOptions(urn=urn))
else:
raise Exception(f"unknown resource type {typ}")
_module_instance = Module()
pulumi.runtime.register_resource_module("alicloud", "ga/accelerator", _module_instance)
pulumi.runtime.register_resource_module("alicloud", "ga/bandwidthPackage", _module_instance)
pulumi.runtime.register_resource_module("alicloud", "ga/bandwidthPackageAttachment", _module_instance)
pulumi.runtime.register_resource_module("alicloud", "ga/endpointGroup", _module_instance)
pulumi.runtime.register_resource_module("alicloud", "ga/forwardingRule", _module_instance)
pulumi.runtime.register_resource_module("alicloud", "ga/ipSet", _module_instance)
pulumi.runtime.register_resource_module("alicloud", "ga/listener", _module_instance)
_register_module() | sdk/python/pulumi_alicloud/ga/__init__.py |
# Export this package's modules as members:
from .accelerator import *
from .bandwidth_package import *
from .bandwidth_package_attachment import *
from .endpoint_group import *
from .forwarding_rule import *
from .get_accelerators import *
from .get_bandwidth_packages import *
from .get_endpoint_groups import *
from .get_forwarding_rules import *
from .get_ip_sets import *
from .get_listeners import *
from .ip_set import *
from .listener import *
from ._inputs import *
from . import outputs
def _register_module():
import pulumi
from .. import _utilities
class Module(pulumi.runtime.ResourceModule):
_version = _utilities.get_semver_version()
def version(self):
return Module._version
def construct(self, name: str, typ: str, urn: str) -> pulumi.Resource:
if typ == "alicloud:ga/accelerator:Accelerator":
return Accelerator(name, pulumi.ResourceOptions(urn=urn))
elif typ == "alicloud:ga/bandwidthPackage:BandwidthPackage":
return BandwidthPackage(name, pulumi.ResourceOptions(urn=urn))
elif typ == "alicloud:ga/bandwidthPackageAttachment:BandwidthPackageAttachment":
return BandwidthPackageAttachment(name, pulumi.ResourceOptions(urn=urn))
elif typ == "alicloud:ga/endpointGroup:EndpointGroup":
return EndpointGroup(name, pulumi.ResourceOptions(urn=urn))
elif typ == "alicloud:ga/forwardingRule:ForwardingRule":
return ForwardingRule(name, pulumi.ResourceOptions(urn=urn))
elif typ == "alicloud:ga/ipSet:IpSet":
return IpSet(name, pulumi.ResourceOptions(urn=urn))
elif typ == "alicloud:ga/listener:Listener":
return Listener(name, pulumi.ResourceOptions(urn=urn))
else:
raise Exception(f"unknown resource type {typ}")
_module_instance = Module()
pulumi.runtime.register_resource_module("alicloud", "ga/accelerator", _module_instance)
pulumi.runtime.register_resource_module("alicloud", "ga/bandwidthPackage", _module_instance)
pulumi.runtime.register_resource_module("alicloud", "ga/bandwidthPackageAttachment", _module_instance)
pulumi.runtime.register_resource_module("alicloud", "ga/endpointGroup", _module_instance)
pulumi.runtime.register_resource_module("alicloud", "ga/forwardingRule", _module_instance)
pulumi.runtime.register_resource_module("alicloud", "ga/ipSet", _module_instance)
pulumi.runtime.register_resource_module("alicloud", "ga/listener", _module_instance)
_register_module() | 0.315841 | 0.066025 |
from datetime import datetime
from graph import Graph
class NearestNeighbour:
def __init__(self, start, graph):
'''
Solution of shortest path problem using nearest neighbour algorithm
'''
self.start = start
self.graph = graph
self.followedRoute = []
self.routeCost = 0
def nearest_neighbour(self, origin, neighbours):
'''
Returns the nearest neighbour of a vertice origin
'''
lowest_cost = float('Inf')
nearest = None
for neighbour in neighbours:
cost = self.graph.cost(origin, neighbour)
if cost < lowest_cost:
lowest_cost = cost
nearest = neighbour
return nearest
def solve(self):
'''
Executes the algorithm
'''
missing = list(self.graph.vertices()) # towns to be visited
currentTown = self.start
self.followedRoute.append(currentTown)
missing.remove(currentTown)
while len(missing) > 0:
closest = self.nearest_neighbour(currentTown, missing)
self.followedRoute.append(closest)
self.routeCost += self.graph.cost(currentTown, closest)
currentTown = closest
missing.remove(currentTown) # remove visited town
# add the last one
self.followedRoute.append(self.start)
self.routeCost += self.graph.cost(currentTown, self.start)
def test(max_runs=5):
results = []
for run in range(max_runs):
print('Run:', run)
graph = Graph('data/test.json')
solution = NearestNeighbour('0', graph)
start_time = datetime.now()
solution.solve()
end_time = datetime.now()
elapsed_time = end_time - start_time
print("Elapsed Time:", str(elapsed_time), "ms")
print("Cost:", solution.routeCost)
print("Path:", solution.followedRoute)
results.append([elapsed_time, solution])
return results
test() | src/nearest_neighbour.py | from datetime import datetime
from graph import Graph
class NearestNeighbour:
def __init__(self, start, graph):
'''
Solution of shortest path problem using nearest neighbour algorithm
'''
self.start = start
self.graph = graph
self.followedRoute = []
self.routeCost = 0
def nearest_neighbour(self, origin, neighbours):
'''
Returns the nearest neighbour of a vertice origin
'''
lowest_cost = float('Inf')
nearest = None
for neighbour in neighbours:
cost = self.graph.cost(origin, neighbour)
if cost < lowest_cost:
lowest_cost = cost
nearest = neighbour
return nearest
def solve(self):
'''
Executes the algorithm
'''
missing = list(self.graph.vertices()) # towns to be visited
currentTown = self.start
self.followedRoute.append(currentTown)
missing.remove(currentTown)
while len(missing) > 0:
closest = self.nearest_neighbour(currentTown, missing)
self.followedRoute.append(closest)
self.routeCost += self.graph.cost(currentTown, closest)
currentTown = closest
missing.remove(currentTown) # remove visited town
# add the last one
self.followedRoute.append(self.start)
self.routeCost += self.graph.cost(currentTown, self.start)
def test(max_runs=5):
results = []
for run in range(max_runs):
print('Run:', run)
graph = Graph('data/test.json')
solution = NearestNeighbour('0', graph)
start_time = datetime.now()
solution.solve()
end_time = datetime.now()
elapsed_time = end_time - start_time
print("Elapsed Time:", str(elapsed_time), "ms")
print("Cost:", solution.routeCost)
print("Path:", solution.followedRoute)
results.append([elapsed_time, solution])
return results
test() | 0.805594 | 0.495545 |
from __future__ import unicode_literals
"""Convienent utilities."""
# ## Imports
import sys
from codecs import iterencode
from inspect import isfunction, isclass
from operator import methodcaller
from collections import deque, namedtuple
from collections.abc import Sized, Iterable
from pkg_resources import iter_entry_points
from xml.sax.saxutils import quoteattr
try: # pragma: no cover
from html.parser import HTMLParser
except ImportError: # pragma: no cover
from HTMLParser import HTMLParser
# ## Python Cross-Compatibility
#
# These allow us to detect relevant version differences for code generation, and overcome some of the minor
# differences in labels between Python 2 and Python 3 compatible runtimes.
#
# The differences, in practice, are minor, and are easily overcome through a small block of version-dependant
# code. Handily, even built-in labels are not sacrosanct; they can be easily assigned to and re-mapped.
#
try: # Python 2
from types import StringTypes as stringy
try:
from cStringIO import StringIO
except: # pragma: no cover
from StringIO import StringIO # This never really happens. Still, nice to be defensive.
bytes = str
str = unicode
py = 2
reduce = reduce
except: # Python 3
from io import StringIO
stringy = str
bytes = bytes
str = str
py = 3
# There are some additional complications for the Pypy runtime.
try:
from sys import pypy_version_info
pypy = True
except ImportError:
pypy = False
# ## Type Definitions
# A tuple representing a single step of fancy iteration.
Iteration = namedtuple('Iteration', ['first', 'last', 'index', 'total', 'value'])
# ## Simple Utility Functions
def stream(input, encoding=None, errors='strict'):
"""Safely iterate a template generator, ignoring ``None`` values and optionally stream encoding.
Used internally by ``cinje.flatten``, this allows for easy use of a template generator as a WSGI body.
"""
input = (i for i in input if i) # Omits `None` (empty wrappers) and empty chunks.
if encoding: # Automatically, and iteratively, encode the text if requested.
input = iterencode(input, encoding, errors=errors)
return input
def flatten(input, file=None, encoding=None, errors='strict'):
"""Return a flattened representation of a cinje chunk stream.
This has several modes of operation. If no `file` argument is given, output will be returned as a string.
The type of string will be determined by the presence of an `encoding`; if one is given the returned value is a
binary string, otherwise the native unicode representation. If a `file` is present, chunks will be written
iteratively through repeated calls to `file.write()`, and the amount of data (characters or bytes) written
returned. The type of string written will be determined by `encoding`, just as the return value is when not
writing to a file-like object. The `errors` argument is passed through when encoding.
We can highly recommend using the various stremaing IO containers available in the
[`io`](https://docs.python.org/3/library/io.html) module, though
[`tempfile`](https://docs.python.org/3/library/tempfile.html) classes are also quite useful.
"""
input = stream(input, encoding, errors)
if file is None: # Exit early if we're not writing to a file.
return b''.join(input) if encoding else ''.join(input)
counter = 0
for chunk in input:
file.write(chunk)
counter += len(chunk)
return counter
def fragment(string, name="anonymous", **context):
"""Translate a template fragment into a callable function.
**Note:** Use of this function is discouraged everywhere except tests, as no caching is implemented at this time.
Only one function may be declared, either manually, or automatically. If automatic defintition is chosen the
resulting function takes no arguments. Additional keyword arguments are passed through as global variables.
"""
if isinstance(string, bytes):
string = string.decode('utf-8')
if ": def" in string or ":def" in string:
code = string.encode('utf8').decode('cinje')
name = None
else:
code = ": def {name}\n\n{string}".format(
name = name,
string = string,
).encode('utf8').decode('cinje')
environ = dict(context)
exec(code, environ)
if name is None: # We need to dig it out of the `__tmpl__` list.
if __debug__ and not environ.get('__tmpl__', None):
raise RuntimeError("Template fragment does not contain a function: " + repr(environ.get('__tmpl__', None)) + \
"\n\n" + code)
return environ[environ['__tmpl__'][-1]] # Super secret sauce: you _can_ define more than one function...
return environ[name]
def interruptable(iterable):
"""Allow easy catching of a generator interrupting operation when using "yield from"."""
for i in iterable:
if i is None:
return
yield i
def iterate(obj):
"""Loop over an iterable and track progress, including first and last state.
On each iteration yield an Iteration named tuple with the first and last flags, current element index, total
iterable length (if possible to acquire), and value, in that order.
for iteration in iterate(something):
iteration.value # Do something.
You can unpack these safely:
for first, last, index, total, value in iterate(something):
pass
If you want to unpack the values you are iterating across, you can by wrapping the nested unpacking in parenthesis:
for first, last, index, total, (foo, bar, baz) in iterate(something):
pass
Even if the length of the iterable can't be reliably determined this function will still capture the "last" state
of the final loop iteration. (Basically: this works with generators.)
This process is about 10x slower than simple enumeration on CPython 3.4, so only use it where you actually need to
track state. Use `enumerate()` elsewhere.
"""
global next, Iteration
next = next
Iteration = Iteration
total = len(obj) if isinstance(obj, Sized) else None
iterator = iter(obj)
first = True
last = False
i = 0
try:
value = next(iterator)
except StopIteration:
return
while True:
try:
next_value = next(iterator)
except StopIteration:
last = True
yield Iteration(first, last, i, total, value)
if last: return
value = next_value
i += 1
first = False
def xmlargs(_source=None, **values):
from cinje.helpers import bless
# Optimize by binding these names to the local scope, saving a lookup on each call.
global str, Iterable, stringy
str = str
Iterable = Iterable
stringy = stringy
ejoin = " ".join
parts = []
pappend = parts.append
# If a data source is provided it overrides the keyword arguments which are treated as defaults.
if _source:
values.update(_source)
for k in sorted(values):
# We technically allow non-string values for keys. They're just converted to strings first.
key = str(k).rstrip('_').replace('__', ':').replace('_', '-')
value = values[k]
# We skip empty, None, False, and other falsy values other than zero.
if k[0] == '_' or (not value and (value is False or value != 0)): # False == 0, so, uh, work around that.
continue
if value is True: # For explicitly True values, we don't have a value for the attribute.
pappend(key)
continue
# Non-string iterables (such as lists, sets, tuples, etc.) are treated as space-separated strings.
if isinstance(value, Iterable) and not isinstance(value, stringy):
value = ejoin(str(i) for i in value)
pappend(key + "=" + quoteattr(str(value)))
return bless(" " + ejoin(parts)) if parts else ''
def chunk(line, mapping={None: 'text', '${': 'escape', '#{': 'bless', '&{': 'args', '%{': 'format', '@{': 'json'}):
"""Chunkify and "tag" a block of text into plain text and code sections.
The first delimeter is blank to represent text sections, and keep the indexes aligned with the tags.
Values are yielded in the form (tag, text).
"""
skipping = 0 # How many closing parenthesis will we need to skip?
start = None # Starting position of current match.
last = 0
i = 0
text = line.line
while i < len(text):
if start is not None:
if text[i] == '{':
skipping += 1
elif text[i] == '}':
if skipping:
skipping -= 1
else:
yield line.clone(kind=mapping[text[start-2:start]], line=text[start:i])
start = None
last = i = i + 1
continue
elif text[i:i+2] in mapping:
if last is not None and last != i:
yield line.clone(kind=mapping[None], line=text[last:i])
last = None
start = i = i + 2
continue
i += 1
if last < len(text):
yield line.clone(kind=mapping[None], line=text[last:])
def ensure_buffer(context, separate=True):
if 'text' in context.flag or 'buffer' not in context.flag:
return
if separate: yield Line(0, "")
yield Line(0, "_buffer = []")
if not pypy:
yield Line(0, "__w, __ws = _buffer.extend, _buffer.append")
yield Line(0, "")
context.flag.add('text')
# ## Common Classes
class Line(object):
"""A rich description for a line of input, allowing for annotation."""
__slots__ = ('number', 'line', 'scope', 'kind', 'continued')
def __init__(self, number, line, scope=None, kind=None):
if isinstance(line, bytes):
line = line.decode('utf-8')
self.number = number
self.line = line
self.scope = scope
self.kind = kind
self.continued = self.stripped.endswith('\\')
if not kind: self.process()
super(Line, self).__init__()
def process(self):
if self.stripped.startswith('#') and not self.stripped.startswith('#{'):
self.kind = 'comment'
elif self.stripped.startswith(':'):
self.kind = 'code'
self.line = self.stripped[1:].lstrip()
else:
self.kind = 'text'
@property
def stripped(self):
return self.line.strip()
@property
def partitioned(self):
prefix, _, remainder = self.stripped.partition(' ')
return prefix.rstrip(), remainder.lstrip()
def __repr__(self):
return '{0.__class__.__name__}({0.number}, {0.kind}, "{0.stripped}")'.format(self)
def __bytes__(self):
return str(self).encode('utf8')
def __str__(self):
if self.scope is None:
return self.line
return '\t' * self.scope + self.line.lstrip()
if py == 2: # pragma: no cover
__unicode__ = __str__
__str__ = __bytes__
del __bytes__
def clone(self, **kw):
values = dict(
number = self.number,
line = self.line,
scope = self.scope,
kind = self.kind,
)
values.update(kw)
instance = self.__class__(**values)
return instance
class Lines(object):
"""Iterate input lines of source, with the ability to push lines back."""
__slots__ = ['Line', 'source', 'buffer']
def __init__(self, input=None, Line=Line):
self.Line = Line
if input is None:
self.source = None
self.buffer = deque()
elif hasattr(input, 'readlines'):
self.source = list(self.Line(i + 1, j) for i, j in enumerate(input.readlines()))
self.buffer = deque(self.source)
else:
self.source = list(self.Line(i + 1, j) for i, j in enumerate(input.split('\n')))
self.buffer = deque(self.source)
super(Lines, self).__init__()
@property
def count(self):
return len(self.buffer)
def __len__(self):
return self.count
def __repr__(self):
return 'Lines({0.count})'.format(self)
def __iter__(self):
return self
def __next__(self):
return self.next()
def __str__(self):
return "\n".join(str(i) for i in self)
def next(self):
if not self.buffer:
raise StopIteration()
return self.buffer.popleft()
def peek(self):
return self.buffer[0] if self.buffer else None
def push(self, *lines):
self.buffer.extendleft((i if isinstance(i, self.Line) else self.Line(self.buffer[0].number if self.buffer else 0, i)) for i in reversed(lines))
def reset(self):
self.buffer = deque(self.source)
def append(self, *lines):
self.buffer.extend((i if isinstance(i, self.Line) else self.Line(self.buffer[-1].number if self.buffer else 0, i)) for i in lines)
class Context(object):
"""The processing context for translating cinje source into Python source.
This is the primary entry point for translation.
"""
__slots__ = ('input', 'scope', 'flag', '_handler', 'templates', 'handlers', 'mapping')
def __init__(self, input):
self.input = Lines(input.decode('utf8') if isinstance(input, bytes) else input)
self.scope = 0
self.flag = set()
self._handler = []
self.handlers = []
self.templates = []
self.mapping = None
for translator in map(methodcaller('load'), iter_entry_points('cinje.translator')):
self.handlers.append(translator)
def __repr__(self):
return "Context({!r}, {}, {})".format(self.input, self.scope, self.flag)
def prepare(self):
"""Prepare the ordered list of transformers and reset context state to initial."""
self.scope = 0
self.mapping = deque([0])
self._handler = [i() for i in sorted(self.handlers, key=lambda handler: handler.priority)]
@property
def stream(self):
"""The workhorse of cinje: transform input lines and emit output lines.
After constructing an instance with a set of input lines iterate this property to generate the template.
"""
if 'init' not in self.flag:
root = True
self.prepare()
else:
root = False
# Track which lines were generated in response to which lines of source code.
# The end result is that there is one entry here for every line emitted, each integer representing the source
# line number that triggered it. If any lines are returned with missing line numbers, they're inferred from
# the last entry already in the list.
# Fun fact: this list is backwards; we optimize by using a deque and appending to the left edge. this updates
# the head of a linked list; the whole thing needs to be reversed to make sense.
mapping = self.mapping
for line in self.input:
handler = self.classify(line)
if line.kind == 'code' and line.stripped == 'end': # Exit the current child scope.
return
assert handler, "Unable to identify handler for line; this should be impossible!"
self.input.push(line) # Put it back so it can be consumed by the handler.
for line in handler(self): # This re-indents the code to match, if missing explicit scope.
if root: mapping.appendleft(line.number or mapping[0]) # Track source line number.
if line.scope is None:
line = line.clone(scope=self.scope)
yield line
def classify(self, line):
"""Identify the correct handler for a given line of input."""
for handler in self._handler:
if handler.match(self, line):
return handler
class Pipe(object):
"""An object representing a pipe-able callable, optionally with preserved arguments.
Using this you can custruct custom subclasses (define a method named "callable") or use it as a decorator:
@Pipe
def s(text):
return str(text)
"""
__slots__ = ('callable', 'args', 'kwargs')
def __init__(self, callable, *args, **kw):
super(Pipe, self).__init__()
self.callable = callable
self.args = args if args else ()
self.kwargs = kw if kw else {}
def __repr__(self):
return "Pipe({self.callable!r}{0}{1})".format(
(', ' + ', '.join(repr(i) for i in self.args)) if self.args else '',
(', ' + ', '.join("{0}={1!r}".format(i, j) for i, j in self.kwargs.items())) if self.kwargs else '',
self = self,
)
def __ror__(self, other):
"""The main machinery of the Pipe, calling the chosen callable with the recorded arguments."""
return self.callable(*(self.args + (other, )), **self.kwargs)
def __call__(self, *args, **kw):
"""Allow for the preserved args and kwargs to be updated, returning a mutated copy.
This allows for usage with arguments, as in the following example:
"Hello!" | encode('utf8')
This also allows for easy construction of custom mutated copies for use later, a la:
utf8 = encode('utf8')
"Hello!" | utf8
"""
return self.__class__(self.callable, *args, **kw)
# ## Tag Stripper
class MLStripper(HTMLParser):
def __init__(self):
self.reset()
self.strict = False
self.convert_charrefs = True
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def strip_tags(html):
s = MLStripper()
s.feed(html)
return s.get_data() | cinje/util.py |
from __future__ import unicode_literals
"""Convienent utilities."""
# ## Imports
import sys
from codecs import iterencode
from inspect import isfunction, isclass
from operator import methodcaller
from collections import deque, namedtuple
from collections.abc import Sized, Iterable
from pkg_resources import iter_entry_points
from xml.sax.saxutils import quoteattr
try: # pragma: no cover
from html.parser import HTMLParser
except ImportError: # pragma: no cover
from HTMLParser import HTMLParser
# ## Python Cross-Compatibility
#
# These allow us to detect relevant version differences for code generation, and overcome some of the minor
# differences in labels between Python 2 and Python 3 compatible runtimes.
#
# The differences, in practice, are minor, and are easily overcome through a small block of version-dependant
# code. Handily, even built-in labels are not sacrosanct; they can be easily assigned to and re-mapped.
#
try: # Python 2
from types import StringTypes as stringy
try:
from cStringIO import StringIO
except: # pragma: no cover
from StringIO import StringIO # This never really happens. Still, nice to be defensive.
bytes = str
str = unicode
py = 2
reduce = reduce
except: # Python 3
from io import StringIO
stringy = str
bytes = bytes
str = str
py = 3
# There are some additional complications for the Pypy runtime.
try:
from sys import pypy_version_info
pypy = True
except ImportError:
pypy = False
# ## Type Definitions
# A tuple representing a single step of fancy iteration.
Iteration = namedtuple('Iteration', ['first', 'last', 'index', 'total', 'value'])
# ## Simple Utility Functions
def stream(input, encoding=None, errors='strict'):
"""Safely iterate a template generator, ignoring ``None`` values and optionally stream encoding.
Used internally by ``cinje.flatten``, this allows for easy use of a template generator as a WSGI body.
"""
input = (i for i in input if i) # Omits `None` (empty wrappers) and empty chunks.
if encoding: # Automatically, and iteratively, encode the text if requested.
input = iterencode(input, encoding, errors=errors)
return input
def flatten(input, file=None, encoding=None, errors='strict'):
"""Return a flattened representation of a cinje chunk stream.
This has several modes of operation. If no `file` argument is given, output will be returned as a string.
The type of string will be determined by the presence of an `encoding`; if one is given the returned value is a
binary string, otherwise the native unicode representation. If a `file` is present, chunks will be written
iteratively through repeated calls to `file.write()`, and the amount of data (characters or bytes) written
returned. The type of string written will be determined by `encoding`, just as the return value is when not
writing to a file-like object. The `errors` argument is passed through when encoding.
We can highly recommend using the various stremaing IO containers available in the
[`io`](https://docs.python.org/3/library/io.html) module, though
[`tempfile`](https://docs.python.org/3/library/tempfile.html) classes are also quite useful.
"""
input = stream(input, encoding, errors)
if file is None: # Exit early if we're not writing to a file.
return b''.join(input) if encoding else ''.join(input)
counter = 0
for chunk in input:
file.write(chunk)
counter += len(chunk)
return counter
def fragment(string, name="anonymous", **context):
"""Translate a template fragment into a callable function.
**Note:** Use of this function is discouraged everywhere except tests, as no caching is implemented at this time.
Only one function may be declared, either manually, or automatically. If automatic defintition is chosen the
resulting function takes no arguments. Additional keyword arguments are passed through as global variables.
"""
if isinstance(string, bytes):
string = string.decode('utf-8')
if ": def" in string or ":def" in string:
code = string.encode('utf8').decode('cinje')
name = None
else:
code = ": def {name}\n\n{string}".format(
name = name,
string = string,
).encode('utf8').decode('cinje')
environ = dict(context)
exec(code, environ)
if name is None: # We need to dig it out of the `__tmpl__` list.
if __debug__ and not environ.get('__tmpl__', None):
raise RuntimeError("Template fragment does not contain a function: " + repr(environ.get('__tmpl__', None)) + \
"\n\n" + code)
return environ[environ['__tmpl__'][-1]] # Super secret sauce: you _can_ define more than one function...
return environ[name]
def interruptable(iterable):
"""Allow easy catching of a generator interrupting operation when using "yield from"."""
for i in iterable:
if i is None:
return
yield i
def iterate(obj):
"""Loop over an iterable and track progress, including first and last state.
On each iteration yield an Iteration named tuple with the first and last flags, current element index, total
iterable length (if possible to acquire), and value, in that order.
for iteration in iterate(something):
iteration.value # Do something.
You can unpack these safely:
for first, last, index, total, value in iterate(something):
pass
If you want to unpack the values you are iterating across, you can by wrapping the nested unpacking in parenthesis:
for first, last, index, total, (foo, bar, baz) in iterate(something):
pass
Even if the length of the iterable can't be reliably determined this function will still capture the "last" state
of the final loop iteration. (Basically: this works with generators.)
This process is about 10x slower than simple enumeration on CPython 3.4, so only use it where you actually need to
track state. Use `enumerate()` elsewhere.
"""
global next, Iteration
next = next
Iteration = Iteration
total = len(obj) if isinstance(obj, Sized) else None
iterator = iter(obj)
first = True
last = False
i = 0
try:
value = next(iterator)
except StopIteration:
return
while True:
try:
next_value = next(iterator)
except StopIteration:
last = True
yield Iteration(first, last, i, total, value)
if last: return
value = next_value
i += 1
first = False
def xmlargs(_source=None, **values):
from cinje.helpers import bless
# Optimize by binding these names to the local scope, saving a lookup on each call.
global str, Iterable, stringy
str = str
Iterable = Iterable
stringy = stringy
ejoin = " ".join
parts = []
pappend = parts.append
# If a data source is provided it overrides the keyword arguments which are treated as defaults.
if _source:
values.update(_source)
for k in sorted(values):
# We technically allow non-string values for keys. They're just converted to strings first.
key = str(k).rstrip('_').replace('__', ':').replace('_', '-')
value = values[k]
# We skip empty, None, False, and other falsy values other than zero.
if k[0] == '_' or (not value and (value is False or value != 0)): # False == 0, so, uh, work around that.
continue
if value is True: # For explicitly True values, we don't have a value for the attribute.
pappend(key)
continue
# Non-string iterables (such as lists, sets, tuples, etc.) are treated as space-separated strings.
if isinstance(value, Iterable) and not isinstance(value, stringy):
value = ejoin(str(i) for i in value)
pappend(key + "=" + quoteattr(str(value)))
return bless(" " + ejoin(parts)) if parts else ''
def chunk(line, mapping={None: 'text', '${': 'escape', '#{': 'bless', '&{': 'args', '%{': 'format', '@{': 'json'}):
"""Chunkify and "tag" a block of text into plain text and code sections.
The first delimeter is blank to represent text sections, and keep the indexes aligned with the tags.
Values are yielded in the form (tag, text).
"""
skipping = 0 # How many closing parenthesis will we need to skip?
start = None # Starting position of current match.
last = 0
i = 0
text = line.line
while i < len(text):
if start is not None:
if text[i] == '{':
skipping += 1
elif text[i] == '}':
if skipping:
skipping -= 1
else:
yield line.clone(kind=mapping[text[start-2:start]], line=text[start:i])
start = None
last = i = i + 1
continue
elif text[i:i+2] in mapping:
if last is not None and last != i:
yield line.clone(kind=mapping[None], line=text[last:i])
last = None
start = i = i + 2
continue
i += 1
if last < len(text):
yield line.clone(kind=mapping[None], line=text[last:])
def ensure_buffer(context, separate=True):
if 'text' in context.flag or 'buffer' not in context.flag:
return
if separate: yield Line(0, "")
yield Line(0, "_buffer = []")
if not pypy:
yield Line(0, "__w, __ws = _buffer.extend, _buffer.append")
yield Line(0, "")
context.flag.add('text')
# ## Common Classes
class Line(object):
"""A rich description for a line of input, allowing for annotation."""
__slots__ = ('number', 'line', 'scope', 'kind', 'continued')
def __init__(self, number, line, scope=None, kind=None):
if isinstance(line, bytes):
line = line.decode('utf-8')
self.number = number
self.line = line
self.scope = scope
self.kind = kind
self.continued = self.stripped.endswith('\\')
if not kind: self.process()
super(Line, self).__init__()
def process(self):
if self.stripped.startswith('#') and not self.stripped.startswith('#{'):
self.kind = 'comment'
elif self.stripped.startswith(':'):
self.kind = 'code'
self.line = self.stripped[1:].lstrip()
else:
self.kind = 'text'
@property
def stripped(self):
return self.line.strip()
@property
def partitioned(self):
prefix, _, remainder = self.stripped.partition(' ')
return prefix.rstrip(), remainder.lstrip()
def __repr__(self):
return '{0.__class__.__name__}({0.number}, {0.kind}, "{0.stripped}")'.format(self)
def __bytes__(self):
return str(self).encode('utf8')
def __str__(self):
if self.scope is None:
return self.line
return '\t' * self.scope + self.line.lstrip()
if py == 2: # pragma: no cover
__unicode__ = __str__
__str__ = __bytes__
del __bytes__
def clone(self, **kw):
values = dict(
number = self.number,
line = self.line,
scope = self.scope,
kind = self.kind,
)
values.update(kw)
instance = self.__class__(**values)
return instance
class Lines(object):
"""Iterate input lines of source, with the ability to push lines back."""
__slots__ = ['Line', 'source', 'buffer']
def __init__(self, input=None, Line=Line):
self.Line = Line
if input is None:
self.source = None
self.buffer = deque()
elif hasattr(input, 'readlines'):
self.source = list(self.Line(i + 1, j) for i, j in enumerate(input.readlines()))
self.buffer = deque(self.source)
else:
self.source = list(self.Line(i + 1, j) for i, j in enumerate(input.split('\n')))
self.buffer = deque(self.source)
super(Lines, self).__init__()
@property
def count(self):
return len(self.buffer)
def __len__(self):
return self.count
def __repr__(self):
return 'Lines({0.count})'.format(self)
def __iter__(self):
return self
def __next__(self):
return self.next()
def __str__(self):
return "\n".join(str(i) for i in self)
def next(self):
if not self.buffer:
raise StopIteration()
return self.buffer.popleft()
def peek(self):
return self.buffer[0] if self.buffer else None
def push(self, *lines):
self.buffer.extendleft((i if isinstance(i, self.Line) else self.Line(self.buffer[0].number if self.buffer else 0, i)) for i in reversed(lines))
def reset(self):
self.buffer = deque(self.source)
def append(self, *lines):
self.buffer.extend((i if isinstance(i, self.Line) else self.Line(self.buffer[-1].number if self.buffer else 0, i)) for i in lines)
class Context(object):
"""The processing context for translating cinje source into Python source.
This is the primary entry point for translation.
"""
__slots__ = ('input', 'scope', 'flag', '_handler', 'templates', 'handlers', 'mapping')
def __init__(self, input):
self.input = Lines(input.decode('utf8') if isinstance(input, bytes) else input)
self.scope = 0
self.flag = set()
self._handler = []
self.handlers = []
self.templates = []
self.mapping = None
for translator in map(methodcaller('load'), iter_entry_points('cinje.translator')):
self.handlers.append(translator)
def __repr__(self):
return "Context({!r}, {}, {})".format(self.input, self.scope, self.flag)
def prepare(self):
"""Prepare the ordered list of transformers and reset context state to initial."""
self.scope = 0
self.mapping = deque([0])
self._handler = [i() for i in sorted(self.handlers, key=lambda handler: handler.priority)]
@property
def stream(self):
"""The workhorse of cinje: transform input lines and emit output lines.
After constructing an instance with a set of input lines iterate this property to generate the template.
"""
if 'init' not in self.flag:
root = True
self.prepare()
else:
root = False
# Track which lines were generated in response to which lines of source code.
# The end result is that there is one entry here for every line emitted, each integer representing the source
# line number that triggered it. If any lines are returned with missing line numbers, they're inferred from
# the last entry already in the list.
# Fun fact: this list is backwards; we optimize by using a deque and appending to the left edge. this updates
# the head of a linked list; the whole thing needs to be reversed to make sense.
mapping = self.mapping
for line in self.input:
handler = self.classify(line)
if line.kind == 'code' and line.stripped == 'end': # Exit the current child scope.
return
assert handler, "Unable to identify handler for line; this should be impossible!"
self.input.push(line) # Put it back so it can be consumed by the handler.
for line in handler(self): # This re-indents the code to match, if missing explicit scope.
if root: mapping.appendleft(line.number or mapping[0]) # Track source line number.
if line.scope is None:
line = line.clone(scope=self.scope)
yield line
def classify(self, line):
"""Identify the correct handler for a given line of input."""
for handler in self._handler:
if handler.match(self, line):
return handler
class Pipe(object):
"""An object representing a pipe-able callable, optionally with preserved arguments.
Using this you can custruct custom subclasses (define a method named "callable") or use it as a decorator:
@Pipe
def s(text):
return str(text)
"""
__slots__ = ('callable', 'args', 'kwargs')
def __init__(self, callable, *args, **kw):
super(Pipe, self).__init__()
self.callable = callable
self.args = args if args else ()
self.kwargs = kw if kw else {}
def __repr__(self):
return "Pipe({self.callable!r}{0}{1})".format(
(', ' + ', '.join(repr(i) for i in self.args)) if self.args else '',
(', ' + ', '.join("{0}={1!r}".format(i, j) for i, j in self.kwargs.items())) if self.kwargs else '',
self = self,
)
def __ror__(self, other):
"""The main machinery of the Pipe, calling the chosen callable with the recorded arguments."""
return self.callable(*(self.args + (other, )), **self.kwargs)
def __call__(self, *args, **kw):
"""Allow for the preserved args and kwargs to be updated, returning a mutated copy.
This allows for usage with arguments, as in the following example:
"Hello!" | encode('utf8')
This also allows for easy construction of custom mutated copies for use later, a la:
utf8 = encode('utf8')
"Hello!" | utf8
"""
return self.__class__(self.callable, *args, **kw)
# ## Tag Stripper
class MLStripper(HTMLParser):
def __init__(self):
self.reset()
self.strict = False
self.convert_charrefs = True
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def strip_tags(html):
s = MLStripper()
s.feed(html)
return s.get_data() | 0.585694 | 0.291312 |
import hashlib
import base64
from glados.models import TinyURL
from elasticsearch_dsl import Search
from django.http import JsonResponse
from datetime import datetime, timedelta, timezone
from glados.usage_statistics import glados_server_statistics
from glados.models import ESTinyURLUsageRecord
from glados.es_connection import DATA_CONNECTION, MONITORING_CONNECTION
DAYS_TO_LIVE = 7
def process_shorten_url_request(request):
if request.method == "POST":
long_url = request.POST.get('long_url', '')
short_url, expiration_str = shorten_url(long_url)
resp_data = {
'hash': short_url,
'expires': expiration_str
}
return JsonResponse(resp_data)
else:
return JsonResponse({'error': 'this is only available via POST'})
def process_extend_url_request(request, hash):
long_url, expiration_str = get_original_url(hash)
resp_data = {
'long_url': long_url,
'expiration_date': expiration_str
}
return JsonResponse(resp_data)
# given a long url, it shortens it and saves it in elastic, it returns the hash obtained
def shorten_url(long_url):
hex_digest = hashlib.md5(long_url.encode('utf-8')).digest()
# replace / and + to avoid routing problems
url_hash = base64.b64encode(hex_digest).decode('utf-8').replace('/', '_').replace('+', '-')
# save this in elastic if it doesn't exist
s = Search(index='chembl_glados_tiny_url')\
.extra(track_total_hits=True).using(DATA_CONNECTION).filter('query_string', query='"' + url_hash + '"')
response = s.execute()
if response.hits.total.value == 0:
dt = datetime.now()
td = timedelta(days=DAYS_TO_LIVE)
expiration_date = dt + td
expires = expiration_date.timestamp() * 1000
tinyURL = TinyURL(long_url=long_url, hash=url_hash, expires=expires)
tinyURL.indexing()
expiration_date_str = expiration_date.replace(tzinfo=timezone.utc).isoformat()
else:
try:
expires = response.hits[0].expires
expiration_date = datetime.utcfromtimestamp(expires / 1000)
expiration_date_str = expiration_date.replace(tzinfo=timezone.utc).isoformat()
except AttributeError:
expiration_date_str = 'Never'
glados_server_statistics.record_tiny_url_usage(ESTinyURLUsageRecord.URL_SHORTENED)
return url_hash, expiration_date_str
def get_original_url(url_hash):
# look here in elastic
s = Search(index='chembl_glados_tiny_url')\
.extra(track_total_hits=True).using(DATA_CONNECTION).filter('query_string', query='"' + url_hash + '"')
response = s.execute(ignore_cache=True)
if response.hits.total.value == 0:
return None, None
try:
expires = response.hits[0].expires
expiration_date = datetime.utcfromtimestamp(expires / 1000)
now = datetime.now()
expired = now > expiration_date
if expired:
return None, None
else:
url = response.hits[0].long_url
glados_server_statistics.record_tiny_url_usage(ESTinyURLUsageRecord.URL_EXPANDED)
expiration_date_str = expiration_date.replace(tzinfo=timezone.utc).isoformat()
return url, expiration_date_str
except AttributeError:
# no expiration time means that it never expires
url = response.hits[0].long_url
glados_server_statistics.record_tiny_url_usage(ESTinyURLUsageRecord.URL_EXPANDED)
return url, None | src/glados/api/chembl/url_shortening/url_shortener.py | import hashlib
import base64
from glados.models import TinyURL
from elasticsearch_dsl import Search
from django.http import JsonResponse
from datetime import datetime, timedelta, timezone
from glados.usage_statistics import glados_server_statistics
from glados.models import ESTinyURLUsageRecord
from glados.es_connection import DATA_CONNECTION, MONITORING_CONNECTION
DAYS_TO_LIVE = 7
def process_shorten_url_request(request):
if request.method == "POST":
long_url = request.POST.get('long_url', '')
short_url, expiration_str = shorten_url(long_url)
resp_data = {
'hash': short_url,
'expires': expiration_str
}
return JsonResponse(resp_data)
else:
return JsonResponse({'error': 'this is only available via POST'})
def process_extend_url_request(request, hash):
long_url, expiration_str = get_original_url(hash)
resp_data = {
'long_url': long_url,
'expiration_date': expiration_str
}
return JsonResponse(resp_data)
# given a long url, it shortens it and saves it in elastic, it returns the hash obtained
def shorten_url(long_url):
hex_digest = hashlib.md5(long_url.encode('utf-8')).digest()
# replace / and + to avoid routing problems
url_hash = base64.b64encode(hex_digest).decode('utf-8').replace('/', '_').replace('+', '-')
# save this in elastic if it doesn't exist
s = Search(index='chembl_glados_tiny_url')\
.extra(track_total_hits=True).using(DATA_CONNECTION).filter('query_string', query='"' + url_hash + '"')
response = s.execute()
if response.hits.total.value == 0:
dt = datetime.now()
td = timedelta(days=DAYS_TO_LIVE)
expiration_date = dt + td
expires = expiration_date.timestamp() * 1000
tinyURL = TinyURL(long_url=long_url, hash=url_hash, expires=expires)
tinyURL.indexing()
expiration_date_str = expiration_date.replace(tzinfo=timezone.utc).isoformat()
else:
try:
expires = response.hits[0].expires
expiration_date = datetime.utcfromtimestamp(expires / 1000)
expiration_date_str = expiration_date.replace(tzinfo=timezone.utc).isoformat()
except AttributeError:
expiration_date_str = 'Never'
glados_server_statistics.record_tiny_url_usage(ESTinyURLUsageRecord.URL_SHORTENED)
return url_hash, expiration_date_str
def get_original_url(url_hash):
# look here in elastic
s = Search(index='chembl_glados_tiny_url')\
.extra(track_total_hits=True).using(DATA_CONNECTION).filter('query_string', query='"' + url_hash + '"')
response = s.execute(ignore_cache=True)
if response.hits.total.value == 0:
return None, None
try:
expires = response.hits[0].expires
expiration_date = datetime.utcfromtimestamp(expires / 1000)
now = datetime.now()
expired = now > expiration_date
if expired:
return None, None
else:
url = response.hits[0].long_url
glados_server_statistics.record_tiny_url_usage(ESTinyURLUsageRecord.URL_EXPANDED)
expiration_date_str = expiration_date.replace(tzinfo=timezone.utc).isoformat()
return url, expiration_date_str
except AttributeError:
# no expiration time means that it never expires
url = response.hits[0].long_url
glados_server_statistics.record_tiny_url_usage(ESTinyURLUsageRecord.URL_EXPANDED)
return url, None | 0.464416 | 0.101857 |
from __future__ import absolute_import, print_function, division
from uuid import uuid4
from random import Random
from itertools import product
from .hive import Hive
from .flower import Flower
from .game_state import rngs, GameState
from hiveminder._util import even_q_in_range as in_range
class SynchronousExecutor(object):
def __init__(self, max_workers=None):
pass
def map(self, func, *iterables, **kwargs):
return map(func, *iterables)
def make_hives(n, board_width, board_height, rng):
all_possible_sites = list(product(range(1, board_width - 1), range(1, board_height - 1)))
return tuple(Hive(*site) for site in rng.sample(all_possible_sites, n))
def make_flowers(n, hives, board_width, board_height, rng, game_params):
all_tiles = set(product(range(1, board_width - 1), range(1, board_height - 1)))
reachable_tiles = set()
occupied_tiles = set()
radius = min(game_params.initial_energy // 2, max(board_width, board_height))
for hive in hives:
occupied_tiles = occupied_tiles | set(in_range(hive.x, hive.y, 1))
reachable_tiles = reachable_tiles | set(in_range(hive.x, hive.y, radius))
all_possible_sites = all_tiles & (reachable_tiles - occupied_tiles)
return tuple(Flower(*site, game_params=game_params, expires=game_params.flower_lifespan)
for site in rng.sample(all_possible_sites, n))
def initialise_game(game_params, boards, board_width, board_height, num_hives, num_flowers, game_length, seed=None):
game_id = str(uuid4())
rng = Random(seed)
rngs[game_id] = rng
hives = [make_hives(num_hives,
board_width,
board_height,
rng) for _ in range(boards)]
flowers = [make_flowers(num_flowers,
board_hives,
board_width,
board_height,
rng, game_params) for board_hives in hives]
return GameState(game_params=game_params,
game_id=game_id,
boards=boards,
board_width=board_width,
board_height=board_height,
hives=hives,
flowers=flowers,
game_length=game_length)
def game(board_width, board_height, num_hives, num_flowers, game_length, algos, game_params, pool=SynchronousExecutor()):
algo_names = list(algos.keys())
algos = list(algos.values())
game_state = initialise_game(game_params,
len(algos),
board_width=board_width,
board_height=board_height,
num_hives=num_hives,
num_flowers=num_flowers,
game_length=game_length)
def prepare_algo(i, algo, board):
if algo.on_start_game is not None:
board_json = board.to_json()
algo.on_start_game(board_json['boardWidth'],
board_json['boardHeight'],
board_json['hives'],
board_json['flowers'],
len(algos),
i,
game_state.game_id,
game_params)
list(pool.map(prepare_algo, range(len(algos)), algos, game_state.boards))
crashed = [dict(headon={}, collided={}, exhausted={}, gobbled={}, seeds={})] * len(algos)
landed_bees = [{}] * len(algos)
lost_volants = [{}] * len(algos)
received_volants = [{}] * len(algos)
while not game_state.game_over():
assert len(algos) == len(game_state.boards)
assert len(algos) == len(crashed)
gamestate_json = game_state.to_json()
scores = [board['score'] for board in gamestate_json['boards']]
print(dict(zip(algo_names, scores)))
def call_algo(i, algo):
return algo.fn(gamestate_json['boards'][i]['boardWidth'],
gamestate_json['boards'][i]['boardHeight'],
gamestate_json['boards'][i]['hives'],
gamestate_json['boards'][i]['flowers'],
gamestate_json['boards'][i]['inflight'],
{classification: {crashed_id: crashed_item.to_json()
for crashed_id, crashed_item in crashes.items()}
for classification, crashes in crashed[i].items()},
{volant_id: volant.to_json() for volant_id, volant in lost_volants[i].items()},
{volant_id: volant.to_json() for volant_id, volant in received_volants[i].items()},
{bee_id: bee.to_json() for bee_id, bee in landed_bees[i].items()},
scores,
i,
gamestate_json['gameId'],
game_state.turn_num)
commands = list(pool.map(call_algo, *zip(*enumerate(algos))))
game_state, crashed, landed_bees, lost_volants, received_volants = game_state.turn(commands)
scores = [board['score'] for board in gamestate_json['boards']]
print_game_result(game_state, algo_names)
def farewell_algo(i, algo, board):
if algo.on_game_over is not None:
board_json = board.to_json()
algo.on_game_over(board_json['boardWidth'],
board_json['boardHeight'],
board_json['hives'],
board_json['flowers'],
board_json['inflight'],
{classification: {crashed_id: crashed_item.to_json()
for crashed_id, crashed_item in crashes.items()}
for classification, crashes in crashed[i].items()},
{volant_id: volant.to_json() for volant_id, volant in lost_volants[i].items()},
{volant_id: volant.to_json() for volant_id, volant in received_volants[i].items()},
{bee_id: bee.to_json() for bee_id, bee in landed_bees[i].items()},
scores,
i,
game_state.game_id,
game_state.turn_num)
list(pool.map(farewell_algo, range(len(algos)), algos, game_state.boards))
return game_state
def print_game_result(game_state, algo_names):
winners = calculate_winner(game_state)
if len(winners) == 1:
print_single_winner(game_state, winners[0], algo_names)
else:
print_multiple_winners(game_state, winners, algo_names)
def calculate_winner(game_state):
scores = [board.calculate_score() for board in game_state.boards]
max_score = max(scores)
return [i for i, score in enumerate(scores) if score == max_score]
def print_single_winner(game_state, winner, algo_names):
winner_name = algo_names[winner]
print('Congratulations to {}! You have won!'.format(winner_name))
print_scores(game_state, algo_names)
print_stats_table(game_state, algo_names)
def print_multiple_winners(game_state, winners, algo_names):
print('We have a tie between {}!'.format(' and '.join([algo_names[i] for i in winners])))
print_scores(game_state, algo_names)
print_stats_table(game_state, algo_names)
def print_scores(game_state, algo_names):
print('Final scores were:')
for i, player in enumerate(algo_names):
print('\t{}:\t{}'.format(player.ljust(20, ' '), game_state.boards[i].calculate_score()))
def print_stats_table(gamestate_json, algo_names):
print ('Breakdown of scores as follows:\n')
summary_tables = {algo_name: gamestate_json.boards[i].summary()
for i, algo_name in enumerate(algo_names)}
keys = list(summary_tables.values())[0].keys()
max_key_length = max(len(k) for k in keys)
col_width = 15
print('Algo Name'.ljust(max_key_length + 5, ' ') + '| ' +
' |'.join([algo_name.rjust(col_width, ' ')
for algo_name in summary_tables.keys()]))
print('-' * (max_key_length + 7 + (col_width + 2) * len(algo_names)))
for key in keys:
print(key.ljust(max_key_length + 5) + '| ' +
' |'.join([str(table[key]).rjust(col_width, ' ')
for table in summary_tables.values()])) | hiveminder/game.py | from __future__ import absolute_import, print_function, division
from uuid import uuid4
from random import Random
from itertools import product
from .hive import Hive
from .flower import Flower
from .game_state import rngs, GameState
from hiveminder._util import even_q_in_range as in_range
class SynchronousExecutor(object):
def __init__(self, max_workers=None):
pass
def map(self, func, *iterables, **kwargs):
return map(func, *iterables)
def make_hives(n, board_width, board_height, rng):
all_possible_sites = list(product(range(1, board_width - 1), range(1, board_height - 1)))
return tuple(Hive(*site) for site in rng.sample(all_possible_sites, n))
def make_flowers(n, hives, board_width, board_height, rng, game_params):
all_tiles = set(product(range(1, board_width - 1), range(1, board_height - 1)))
reachable_tiles = set()
occupied_tiles = set()
radius = min(game_params.initial_energy // 2, max(board_width, board_height))
for hive in hives:
occupied_tiles = occupied_tiles | set(in_range(hive.x, hive.y, 1))
reachable_tiles = reachable_tiles | set(in_range(hive.x, hive.y, radius))
all_possible_sites = all_tiles & (reachable_tiles - occupied_tiles)
return tuple(Flower(*site, game_params=game_params, expires=game_params.flower_lifespan)
for site in rng.sample(all_possible_sites, n))
def initialise_game(game_params, boards, board_width, board_height, num_hives, num_flowers, game_length, seed=None):
game_id = str(uuid4())
rng = Random(seed)
rngs[game_id] = rng
hives = [make_hives(num_hives,
board_width,
board_height,
rng) for _ in range(boards)]
flowers = [make_flowers(num_flowers,
board_hives,
board_width,
board_height,
rng, game_params) for board_hives in hives]
return GameState(game_params=game_params,
game_id=game_id,
boards=boards,
board_width=board_width,
board_height=board_height,
hives=hives,
flowers=flowers,
game_length=game_length)
def game(board_width, board_height, num_hives, num_flowers, game_length, algos, game_params, pool=SynchronousExecutor()):
algo_names = list(algos.keys())
algos = list(algos.values())
game_state = initialise_game(game_params,
len(algos),
board_width=board_width,
board_height=board_height,
num_hives=num_hives,
num_flowers=num_flowers,
game_length=game_length)
def prepare_algo(i, algo, board):
if algo.on_start_game is not None:
board_json = board.to_json()
algo.on_start_game(board_json['boardWidth'],
board_json['boardHeight'],
board_json['hives'],
board_json['flowers'],
len(algos),
i,
game_state.game_id,
game_params)
list(pool.map(prepare_algo, range(len(algos)), algos, game_state.boards))
crashed = [dict(headon={}, collided={}, exhausted={}, gobbled={}, seeds={})] * len(algos)
landed_bees = [{}] * len(algos)
lost_volants = [{}] * len(algos)
received_volants = [{}] * len(algos)
while not game_state.game_over():
assert len(algos) == len(game_state.boards)
assert len(algos) == len(crashed)
gamestate_json = game_state.to_json()
scores = [board['score'] for board in gamestate_json['boards']]
print(dict(zip(algo_names, scores)))
def call_algo(i, algo):
return algo.fn(gamestate_json['boards'][i]['boardWidth'],
gamestate_json['boards'][i]['boardHeight'],
gamestate_json['boards'][i]['hives'],
gamestate_json['boards'][i]['flowers'],
gamestate_json['boards'][i]['inflight'],
{classification: {crashed_id: crashed_item.to_json()
for crashed_id, crashed_item in crashes.items()}
for classification, crashes in crashed[i].items()},
{volant_id: volant.to_json() for volant_id, volant in lost_volants[i].items()},
{volant_id: volant.to_json() for volant_id, volant in received_volants[i].items()},
{bee_id: bee.to_json() for bee_id, bee in landed_bees[i].items()},
scores,
i,
gamestate_json['gameId'],
game_state.turn_num)
commands = list(pool.map(call_algo, *zip(*enumerate(algos))))
game_state, crashed, landed_bees, lost_volants, received_volants = game_state.turn(commands)
scores = [board['score'] for board in gamestate_json['boards']]
print_game_result(game_state, algo_names)
def farewell_algo(i, algo, board):
if algo.on_game_over is not None:
board_json = board.to_json()
algo.on_game_over(board_json['boardWidth'],
board_json['boardHeight'],
board_json['hives'],
board_json['flowers'],
board_json['inflight'],
{classification: {crashed_id: crashed_item.to_json()
for crashed_id, crashed_item in crashes.items()}
for classification, crashes in crashed[i].items()},
{volant_id: volant.to_json() for volant_id, volant in lost_volants[i].items()},
{volant_id: volant.to_json() for volant_id, volant in received_volants[i].items()},
{bee_id: bee.to_json() for bee_id, bee in landed_bees[i].items()},
scores,
i,
game_state.game_id,
game_state.turn_num)
list(pool.map(farewell_algo, range(len(algos)), algos, game_state.boards))
return game_state
def print_game_result(game_state, algo_names):
winners = calculate_winner(game_state)
if len(winners) == 1:
print_single_winner(game_state, winners[0], algo_names)
else:
print_multiple_winners(game_state, winners, algo_names)
def calculate_winner(game_state):
scores = [board.calculate_score() for board in game_state.boards]
max_score = max(scores)
return [i for i, score in enumerate(scores) if score == max_score]
def print_single_winner(game_state, winner, algo_names):
winner_name = algo_names[winner]
print('Congratulations to {}! You have won!'.format(winner_name))
print_scores(game_state, algo_names)
print_stats_table(game_state, algo_names)
def print_multiple_winners(game_state, winners, algo_names):
print('We have a tie between {}!'.format(' and '.join([algo_names[i] for i in winners])))
print_scores(game_state, algo_names)
print_stats_table(game_state, algo_names)
def print_scores(game_state, algo_names):
print('Final scores were:')
for i, player in enumerate(algo_names):
print('\t{}:\t{}'.format(player.ljust(20, ' '), game_state.boards[i].calculate_score()))
def print_stats_table(gamestate_json, algo_names):
print ('Breakdown of scores as follows:\n')
summary_tables = {algo_name: gamestate_json.boards[i].summary()
for i, algo_name in enumerate(algo_names)}
keys = list(summary_tables.values())[0].keys()
max_key_length = max(len(k) for k in keys)
col_width = 15
print('Algo Name'.ljust(max_key_length + 5, ' ') + '| ' +
' |'.join([algo_name.rjust(col_width, ' ')
for algo_name in summary_tables.keys()]))
print('-' * (max_key_length + 7 + (col_width + 2) * len(algo_names)))
for key in keys:
print(key.ljust(max_key_length + 5) + '| ' +
' |'.join([str(table[key]).rjust(col_width, ' ')
for table in summary_tables.values()])) | 0.621771 | 0.250638 |
from typing import Any
import pathlib
import json
import pytest
from _pytest.monkeypatch import MonkeyPatch
from django.utils import timezone
from django.conf import settings
from weather.libs.api.open_weather_map import OpenWeatherMap
from weather.libs.api.request_flow_controller import RequestFlowController
@pytest.fixture
def country_fake_data() -> dict[str, Any]:
return {
'name': 'France',
'country_code': 'FR',
}
@pytest.fixture
def city_fake_data() -> dict[str, Any]:
return {
'city_id': 123456,
'name': 'Rennes',
}
@pytest.fixture
def ocean_fake_data() -> dict[str, Any]:
return {
'name': 'Atlantic Ocean',
}
@pytest.fixture
def location_fake_data() -> dict[str, Any]:
return {
'lat': 48.10618240499252,
'lon': -1.6479917725717026,
}
@pytest.fixture
def weather_fake_data() -> dict[str, Any]:
return {
'weather_id': 123456,
'state': 'Cloudy',
'description': 'It\'s means that\'s cloudy bro.',
}
@pytest.fixture
def measure_fake_data() -> dict[str, Any]:
return {
'measure_num': 1,
'created_at': timezone.now(),
'measured_at': timezone.now(),
'tz_timestamp': -43000,
'wind_speed': 42.8,
'wind_deg': 45,
'wind_gust': 45.2,
'visibility': 1000,
'temp': 14.5,
'feels_like': 12.14,
'temp_min': 8.6,
'temp_max': 14.5,
'pressure': 10,
'humidity': 5,
'sea_level': 10,
'ground_level': 12,
}
@pytest.fixture
def current_weather_fake_data() -> list[dict[str, Any]]:
data: list[dict[str, Any]] = json.loads(
(
settings.BASE_DIR
/ 'weather'
/ 'tests'
/ '__samples__'
/ 'weather_fake_data.json'
).read_text()
)
return data
@pytest.fixture
def fake_token() -> str:
return '<KEY>'
@pytest.fixture
def fake_owm(
fake_token: str, tmp_path: pathlib.Path, monkeypatch: MonkeyPatch
) -> OpenWeatherMap:
monkeypatch.setattr(settings, 'BASE_DIR', tmp_path)
return OpenWeatherMap(token=fake_token, calls_per_min=4) | weather/tests/conftest.py | from typing import Any
import pathlib
import json
import pytest
from _pytest.monkeypatch import MonkeyPatch
from django.utils import timezone
from django.conf import settings
from weather.libs.api.open_weather_map import OpenWeatherMap
from weather.libs.api.request_flow_controller import RequestFlowController
@pytest.fixture
def country_fake_data() -> dict[str, Any]:
return {
'name': 'France',
'country_code': 'FR',
}
@pytest.fixture
def city_fake_data() -> dict[str, Any]:
return {
'city_id': 123456,
'name': 'Rennes',
}
@pytest.fixture
def ocean_fake_data() -> dict[str, Any]:
return {
'name': 'Atlantic Ocean',
}
@pytest.fixture
def location_fake_data() -> dict[str, Any]:
return {
'lat': 48.10618240499252,
'lon': -1.6479917725717026,
}
@pytest.fixture
def weather_fake_data() -> dict[str, Any]:
return {
'weather_id': 123456,
'state': 'Cloudy',
'description': 'It\'s means that\'s cloudy bro.',
}
@pytest.fixture
def measure_fake_data() -> dict[str, Any]:
return {
'measure_num': 1,
'created_at': timezone.now(),
'measured_at': timezone.now(),
'tz_timestamp': -43000,
'wind_speed': 42.8,
'wind_deg': 45,
'wind_gust': 45.2,
'visibility': 1000,
'temp': 14.5,
'feels_like': 12.14,
'temp_min': 8.6,
'temp_max': 14.5,
'pressure': 10,
'humidity': 5,
'sea_level': 10,
'ground_level': 12,
}
@pytest.fixture
def current_weather_fake_data() -> list[dict[str, Any]]:
data: list[dict[str, Any]] = json.loads(
(
settings.BASE_DIR
/ 'weather'
/ 'tests'
/ '__samples__'
/ 'weather_fake_data.json'
).read_text()
)
return data
@pytest.fixture
def fake_token() -> str:
return '<KEY>'
@pytest.fixture
def fake_owm(
fake_token: str, tmp_path: pathlib.Path, monkeypatch: MonkeyPatch
) -> OpenWeatherMap:
monkeypatch.setattr(settings, 'BASE_DIR', tmp_path)
return OpenWeatherMap(token=fake_token, calls_per_min=4) | 0.656438 | 0.239272 |
import tkinter as tk
from Data import Data
from EditClient import EditClient
class Client(tk.Frame):
def __init__(self, master=None, client_id=None):
super().__init__(master)
self.id = client_id
self.create_table()
def create_table(self):
global data
data = Data()
data.clientById(self.id)
data.contractByClient(self.id)
label1 = tk.Label(self.master,
text=data.client[0]["name"])
label1.grid(row=0, column=0)
label2 = tk.Label(self.master,
text="rg: "+data.client[0]["rg"])
label2.grid(row=1, column=0)
label3 = tk.Label(self.master,
text="gênero: "+data.client[0]["gender"])
label3.grid(row=2, column=0)
label4 = tk.Label(self.master,
text="nascimento: "+data.client[0]["born"])
label4.grid(row=3, column=0)
labelframe1 = tk.LabelFrame(self.master,
text="Informações adicionais")
labelframe1.grid(row=0, column=1)
label5 = tk.Label(labelframe1, text=data.client[0]["info"])
label5.grid()
labelframe2 = tk.LabelFrame(self.master, text="Contratos")
labelframe2.grid(row=4, column=0)
for line in data.contract:
label6 = tk.Label(labelframe2, text=data.contract[0]["name"])
label6.grid()
if(data.contract[0]["price"]=="None"):
label7 = tk.Label(labelframe2, text=data.contract[0]["monthly_price"])
label7.grid(row=1, column=0)
else:
label7 = tk.Label(labelframe2, text=data.contract[0]["price"])
label7.grid(row=1, column=0)
label8 = tk.Label(labelframe2, text=data.contract[0]["start"] +\
" até " + data.contract[0]["end"])
label8.grid(row=2, column=0)
button1 = tk.Button(self.master, text="Editar", command=self.edit_client)
button1.grid()
def edit_client(self):
root = tk.Tk()
edit = EditClient(master=root, id_client=data.client[0]["id"])
edit.addButtons()
#root = tk.Tk()
#app = Client(root, 1)
#app.mainloop()
#data = Data()
#data.db.close() | broker-manager/Client.py | import tkinter as tk
from Data import Data
from EditClient import EditClient
class Client(tk.Frame):
def __init__(self, master=None, client_id=None):
super().__init__(master)
self.id = client_id
self.create_table()
def create_table(self):
global data
data = Data()
data.clientById(self.id)
data.contractByClient(self.id)
label1 = tk.Label(self.master,
text=data.client[0]["name"])
label1.grid(row=0, column=0)
label2 = tk.Label(self.master,
text="rg: "+data.client[0]["rg"])
label2.grid(row=1, column=0)
label3 = tk.Label(self.master,
text="gênero: "+data.client[0]["gender"])
label3.grid(row=2, column=0)
label4 = tk.Label(self.master,
text="nascimento: "+data.client[0]["born"])
label4.grid(row=3, column=0)
labelframe1 = tk.LabelFrame(self.master,
text="Informações adicionais")
labelframe1.grid(row=0, column=1)
label5 = tk.Label(labelframe1, text=data.client[0]["info"])
label5.grid()
labelframe2 = tk.LabelFrame(self.master, text="Contratos")
labelframe2.grid(row=4, column=0)
for line in data.contract:
label6 = tk.Label(labelframe2, text=data.contract[0]["name"])
label6.grid()
if(data.contract[0]["price"]=="None"):
label7 = tk.Label(labelframe2, text=data.contract[0]["monthly_price"])
label7.grid(row=1, column=0)
else:
label7 = tk.Label(labelframe2, text=data.contract[0]["price"])
label7.grid(row=1, column=0)
label8 = tk.Label(labelframe2, text=data.contract[0]["start"] +\
" até " + data.contract[0]["end"])
label8.grid(row=2, column=0)
button1 = tk.Button(self.master, text="Editar", command=self.edit_client)
button1.grid()
def edit_client(self):
root = tk.Tk()
edit = EditClient(master=root, id_client=data.client[0]["id"])
edit.addButtons()
#root = tk.Tk()
#app = Client(root, 1)
#app.mainloop()
#data = Data()
#data.db.close() | 0.169303 | 0.137301 |
import numpy as np
import scipy as sp
import scipy.spatial
from ..point_set import PointSet
from ..misc.interior_points import find_interior_points
class Boundary(PointSet):
"""
Parent class for all "Boundaries"
This class should never be used directly
Always instantiate Boundaries through the child classes
Methods:
__init__:
pre-initialization routine
must be provided by child class
__init2__:
post-initialization routine to be called at end of child init
compute_quadrature:
the workhorse for this class - computes quadrature nodes and weights
must be provided by child class
find_interior_points:
computes which points of target are interior to target or not
"""
def __init__(self, x=None, y=None, c=None):
super(Boundary, self).__init__(x, y, c)
# end __init__ function definition
def find_interior_points(self, target):
"""
Computes interior/exterior points via cauchy sum +
brute force search near to boundary, using matplotlib.path
and treating the boundary as a discrete polygon
for the brute force search
if the boundary type has a simple way to deal with this,
this method should be overwritten by the child class
"""
return find_interior_points(self, target)
def decorate(self, decoration_name, *args, **kwargs):
"""
Function for calling boundary decoraters
(Not sure this is necessary?)
"""
getattr(self, decoration_name)(args, kwargs)
# Decorations shared across boundary classes
def stack_normal(self):
if not hasattr(self, 'normal_stacked'):
self.stacked_normal = np.column_stack([self.normal_x, self.normal_y])
self.stacked_normal_T = self.stacked_normal.T
self.normal_stacked = True
def get_stacked_normal(self, T=True):
self.stack_normal()
return self.stacked_normal_T if T else self.stacked_normal
def FMM_preparations(self):
if not hasattr(self, 'prepared_for_FMM'):
self.stack_boundary()
self.stack_normal()
self.prepared_for_FMM = True | pybie2d/boundaries/boundary.py | import numpy as np
import scipy as sp
import scipy.spatial
from ..point_set import PointSet
from ..misc.interior_points import find_interior_points
class Boundary(PointSet):
"""
Parent class for all "Boundaries"
This class should never be used directly
Always instantiate Boundaries through the child classes
Methods:
__init__:
pre-initialization routine
must be provided by child class
__init2__:
post-initialization routine to be called at end of child init
compute_quadrature:
the workhorse for this class - computes quadrature nodes and weights
must be provided by child class
find_interior_points:
computes which points of target are interior to target or not
"""
def __init__(self, x=None, y=None, c=None):
super(Boundary, self).__init__(x, y, c)
# end __init__ function definition
def find_interior_points(self, target):
"""
Computes interior/exterior points via cauchy sum +
brute force search near to boundary, using matplotlib.path
and treating the boundary as a discrete polygon
for the brute force search
if the boundary type has a simple way to deal with this,
this method should be overwritten by the child class
"""
return find_interior_points(self, target)
def decorate(self, decoration_name, *args, **kwargs):
"""
Function for calling boundary decoraters
(Not sure this is necessary?)
"""
getattr(self, decoration_name)(args, kwargs)
# Decorations shared across boundary classes
def stack_normal(self):
if not hasattr(self, 'normal_stacked'):
self.stacked_normal = np.column_stack([self.normal_x, self.normal_y])
self.stacked_normal_T = self.stacked_normal.T
self.normal_stacked = True
def get_stacked_normal(self, T=True):
self.stack_normal()
return self.stacked_normal_T if T else self.stacked_normal
def FMM_preparations(self):
if not hasattr(self, 'prepared_for_FMM'):
self.stack_boundary()
self.stack_normal()
self.prepared_for_FMM = True | 0.730866 | 0.356251 |
from __future__ import annotations
from typing import Dict
class BaseHoradricError(Exception):
"""
Base error for Horadric projects
"""
text_code: str = None
error_template: str = None
def __init_subclass__(cls, **kwargs):
if cls.text_code is None:
raise ValueError("`text_code` must be set")
if cls.error_template is None:
raise ValueError("`error_template must be set`")
def __init__(self, **kwargs):
"""
Only kwargs because templating
"""
self.kwargs: dict = kwargs
def __str__(self):
return self.error_template.format(**self.kwargs)
class BaseLogicError(BaseHoradricError):
# TODO: remove checks for base errors
text_code: str = "logic_error"
error_template: str = "Some logic error"
class BaseHttpError(BaseHoradricError):
text_code: str = "http_error"
error_template: str = "Some http error"
code: int = 500
class NotModified(BaseHttpError):
code = 304
text_code = "not_modified"
error_template = "Not Modified"
class BadRequestFormat(BaseHttpError):
# https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/415
code = 415
text_code = "bad_request_format"
error_template = "Bad request mimetype: {format_}"
def __init__(self, format_):
# type: (str) -> None
super(BadRequestFormat, self).__init__(format_=format_)
class RequestParsingError(BaseHttpError):
code = 400
text_code = "request_parsing_error"
error_template = "Request parsing error for format: {format_}"
def __init__(self, format_):
# type: (str) -> None
super(RequestParsingError, self).__init__(format_=format_)
class BadResponseFormat(BaseHttpError):
# https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/406
code = 406
text_code = "bad_response_format"
error_template = "Bad response format: {format_}"
def __init__(self, format_):
# type: (str) -> None
super(BadResponseFormat, self).__init__(format_=format_)
class RequestValidationError(BaseHttpError):
code = 400
text_code = "request_validation_error"
error_template = "Request validation errors: {errors}"
def __init__(self, errors: Dict[str, str]) -> None:
super(RequestValidationError, self).__init__(errors=errors) | open_horadric_lib/base/exception.py | from __future__ import annotations
from typing import Dict
class BaseHoradricError(Exception):
"""
Base error for Horadric projects
"""
text_code: str = None
error_template: str = None
def __init_subclass__(cls, **kwargs):
if cls.text_code is None:
raise ValueError("`text_code` must be set")
if cls.error_template is None:
raise ValueError("`error_template must be set`")
def __init__(self, **kwargs):
"""
Only kwargs because templating
"""
self.kwargs: dict = kwargs
def __str__(self):
return self.error_template.format(**self.kwargs)
class BaseLogicError(BaseHoradricError):
# TODO: remove checks for base errors
text_code: str = "logic_error"
error_template: str = "Some logic error"
class BaseHttpError(BaseHoradricError):
text_code: str = "http_error"
error_template: str = "Some http error"
code: int = 500
class NotModified(BaseHttpError):
code = 304
text_code = "not_modified"
error_template = "Not Modified"
class BadRequestFormat(BaseHttpError):
# https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/415
code = 415
text_code = "bad_request_format"
error_template = "Bad request mimetype: {format_}"
def __init__(self, format_):
# type: (str) -> None
super(BadRequestFormat, self).__init__(format_=format_)
class RequestParsingError(BaseHttpError):
code = 400
text_code = "request_parsing_error"
error_template = "Request parsing error for format: {format_}"
def __init__(self, format_):
# type: (str) -> None
super(RequestParsingError, self).__init__(format_=format_)
class BadResponseFormat(BaseHttpError):
# https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/406
code = 406
text_code = "bad_response_format"
error_template = "Bad response format: {format_}"
def __init__(self, format_):
# type: (str) -> None
super(BadResponseFormat, self).__init__(format_=format_)
class RequestValidationError(BaseHttpError):
code = 400
text_code = "request_validation_error"
error_template = "Request validation errors: {errors}"
def __init__(self, errors: Dict[str, str]) -> None:
super(RequestValidationError, self).__init__(errors=errors) | 0.709019 | 0.095687 |
import telebot
import socks, socket
import config
import time
import stats_getter
import image_maker
import db
import requests.exceptions as rqst_expts
print('modules imported')
bot = telebot.TeleBot(config.tg_token)
# socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, '192.168.127.12', 1080)
telebot.apihelper.proxy = {'https': 'socks5h://172.16.17.32:32125'}
if bot:
print('bot started')
else:
print('bot start failed')
user_list = []
lfm_username = ''
period_table = {'period_week': '7day',
'period_month': '1month',
'period_3month': '3month',
'period_6month': '6month',
'period_year': '12month',
'period_overall': 'overall'}
def generate_pic(username, period):
top_artists = stats_getter.get_top_artist(username, period)
image_report = image_maker.make_report_image_bytes(top_artists, with_frame=True)
# stats_getter.save_image(image_report, 'image_report')
return image_report
@bot.message_handler(content_types='text')
def answer(message):
global user_list
if not any(user['chat_id'] == message.chat.id for user in user_list):
print('New chat, id: {}'.format(message.chat.id))
user_list.append({'chat_id': message.chat.id})
bot.send_message(message.chat.id, 'Привет! Введите имя пользователя с помощью команды /username')
else:
if '/username' in message.text and message.text.find(' ') > 0:
username = message.text[message.text.find(' ')+1:len(message.text)]
for user in user_list:
if user.get('chat_id') == message.chat.id:
print('We got user with id ({})'.format(message.chat.id))
user.update({'lastfm_username': username})
print(user)
keyboard = telebot.types.InlineKeyboardMarkup()
cb_period_week = telebot.types.InlineKeyboardButton(text="Неделя", callback_data="period_week")
cb_period_month = telebot.types.InlineKeyboardButton(text="Месяц", callback_data="period_month")
cb_period_month_three = telebot.types.InlineKeyboardButton(text="3 месяца", callback_data="period_3month")
cb_period_month_six = telebot.types.InlineKeyboardButton(text="Полгода", callback_data="period_6month")
cb_period_year = telebot.types.InlineKeyboardButton(text="Год", callback_data="period_year")
cb_period_overall = telebot.types.InlineKeyboardButton(text="За все время", callback_data="period_overall")
keyboard.add(cb_period_week, cb_period_month, cb_period_month_three, cb_period_month_six, cb_period_year,
cb_period_overall)
answer_msg = "Выберите период статистики:"
bot.send_message(message.chat.id, answer_msg, reply_markup=keyboard)
else:
current_user = next(user for user in user_list if user["chat_id"] == message.chat.id)
if 'lastfm_username' in current_user:
answer_msg = "Вы случайно не {}? Попробуйте еще раз. Например: \n <a href='https://example.com'>This is an example</a>"\
.format(current_user['lastfm_username'], current_user['lastfm_username'],
current_user['lastfm_username'], )
else:
answer_msg = "Что-то не так с именем пользователя. Попробуйте еще раз. Например: \n " \
"[/username ph1l74](/username ph1l74)"
bot.send_message(message.chat.id, answer_msg)
@bot.callback_query_handler(func=lambda call: True)
def callback_inline(call):
global period_table, user_list
if call.message:
username = ''
for user in user_list:
if user.get('chat_id') == call.message.chat.id:
username = user.get('lastfm_username')
break
period = period_table[call.data]
if len(username) > 0 and period:
image = generate_pic(username=username, period=period)
bot.send_photo(call.message.chat.id, image)
# bot.send_message(call.message.chat.id, answer_msg)
else:
bot.send_message(call.message.chat.id, 'Простите, но вы не ввели имя пользователя Last FM. \
Наберите команду: /username и введите имя пользователя, например: \n /username filatique')
def test_db(chat_id):
db.get_user_by_chat_id(db_config=config.db, tg_chat_id=chat_id)
while True:
try:
bot.polling(none_stop="True", timeout=10)
except AttributeError:
print('Attribute Error')
bot.polling(none_stop="True", timeout=10)
except ConnectionResetError:
print('Connection Reset Error. Retry in 5 secs...')
time.sleep(5)
bot.polling(none_stop="True", timeout=10)
except ConnectionError:
print('Connection Error. Retry in 5 secs...')
time.sleep(5)
bot.polling(none_stop="True", timeout=10)
except rqst_expts.ConnectTimeout:
print('Connection Timeout Error. Retry in 5 secs...')
time.sleep(5)
bot.polling(none_stop="True", timeout=10) | main.py | import telebot
import socks, socket
import config
import time
import stats_getter
import image_maker
import db
import requests.exceptions as rqst_expts
print('modules imported')
bot = telebot.TeleBot(config.tg_token)
# socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, '192.168.127.12', 1080)
telebot.apihelper.proxy = {'https': 'socks5h://172.16.17.32:32125'}
if bot:
print('bot started')
else:
print('bot start failed')
user_list = []
lfm_username = ''
period_table = {'period_week': '7day',
'period_month': '1month',
'period_3month': '3month',
'period_6month': '6month',
'period_year': '12month',
'period_overall': 'overall'}
def generate_pic(username, period):
top_artists = stats_getter.get_top_artist(username, period)
image_report = image_maker.make_report_image_bytes(top_artists, with_frame=True)
# stats_getter.save_image(image_report, 'image_report')
return image_report
@bot.message_handler(content_types='text')
def answer(message):
global user_list
if not any(user['chat_id'] == message.chat.id for user in user_list):
print('New chat, id: {}'.format(message.chat.id))
user_list.append({'chat_id': message.chat.id})
bot.send_message(message.chat.id, 'Привет! Введите имя пользователя с помощью команды /username')
else:
if '/username' in message.text and message.text.find(' ') > 0:
username = message.text[message.text.find(' ')+1:len(message.text)]
for user in user_list:
if user.get('chat_id') == message.chat.id:
print('We got user with id ({})'.format(message.chat.id))
user.update({'lastfm_username': username})
print(user)
keyboard = telebot.types.InlineKeyboardMarkup()
cb_period_week = telebot.types.InlineKeyboardButton(text="Неделя", callback_data="period_week")
cb_period_month = telebot.types.InlineKeyboardButton(text="Месяц", callback_data="period_month")
cb_period_month_three = telebot.types.InlineKeyboardButton(text="3 месяца", callback_data="period_3month")
cb_period_month_six = telebot.types.InlineKeyboardButton(text="Полгода", callback_data="period_6month")
cb_period_year = telebot.types.InlineKeyboardButton(text="Год", callback_data="period_year")
cb_period_overall = telebot.types.InlineKeyboardButton(text="За все время", callback_data="period_overall")
keyboard.add(cb_period_week, cb_period_month, cb_period_month_three, cb_period_month_six, cb_period_year,
cb_period_overall)
answer_msg = "Выберите период статистики:"
bot.send_message(message.chat.id, answer_msg, reply_markup=keyboard)
else:
current_user = next(user for user in user_list if user["chat_id"] == message.chat.id)
if 'lastfm_username' in current_user:
answer_msg = "Вы случайно не {}? Попробуйте еще раз. Например: \n <a href='https://example.com'>This is an example</a>"\
.format(current_user['lastfm_username'], current_user['lastfm_username'],
current_user['lastfm_username'], )
else:
answer_msg = "Что-то не так с именем пользователя. Попробуйте еще раз. Например: \n " \
"[/username ph1l74](/username ph1l74)"
bot.send_message(message.chat.id, answer_msg)
@bot.callback_query_handler(func=lambda call: True)
def callback_inline(call):
global period_table, user_list
if call.message:
username = ''
for user in user_list:
if user.get('chat_id') == call.message.chat.id:
username = user.get('lastfm_username')
break
period = period_table[call.data]
if len(username) > 0 and period:
image = generate_pic(username=username, period=period)
bot.send_photo(call.message.chat.id, image)
# bot.send_message(call.message.chat.id, answer_msg)
else:
bot.send_message(call.message.chat.id, 'Простите, но вы не ввели имя пользователя Last FM. \
Наберите команду: /username и введите имя пользователя, например: \n /username filatique')
def test_db(chat_id):
db.get_user_by_chat_id(db_config=config.db, tg_chat_id=chat_id)
while True:
try:
bot.polling(none_stop="True", timeout=10)
except AttributeError:
print('Attribute Error')
bot.polling(none_stop="True", timeout=10)
except ConnectionResetError:
print('Connection Reset Error. Retry in 5 secs...')
time.sleep(5)
bot.polling(none_stop="True", timeout=10)
except ConnectionError:
print('Connection Error. Retry in 5 secs...')
time.sleep(5)
bot.polling(none_stop="True", timeout=10)
except rqst_expts.ConnectTimeout:
print('Connection Timeout Error. Retry in 5 secs...')
time.sleep(5)
bot.polling(none_stop="True", timeout=10) | 0.17692 | 0.142143 |
import re
class ModelInflector(object):
def __new__(cls, model):
try:
return _inflectors[cls]
except KeyError:
inflector = super(ModelInflector, cls).__new__(cls, model)
_inflectors[model] = inflector
return inflector
def __init__(self, model):
self.model = model
register_inflections(self)
def all_inflections(self):
return {
'model_singular': self.model_singular,
'model_plural': self.model_plural,
'table_singular': self.table_singular,
'table_plural': self.table_plural,
'human_singular': self.human_singular,
'human_plural': self.human_plural,
'title_singular': self.title_singular,
'title_plural': self.title_plural,
}
@property
def model_singular(self):
return self.model.__name__
@property
def model_plural(self):
return self.title_plural.replace(' ', '')
@property
def table_singular(self):
return self.underscore_from_camelcase(self.model_singular)
@property
def table_plural(self):
return self.model.__tablename__
@property
def human_singular(self):
return self.title_singular.lower()
@property
def human_plural(self):
return self.title_plural.lower()
@property
def title_singular(self):
return self.titleize_from_camelcase(self.model.__name__)
@property
def title_plural(self):
return self.model.__tablename__.replace('_', ' ').title()
# Helpers
@classmethod
def underscore_from_camelcase(cls, s):
s1 = re.sub(r'(.)([A-Z][a-z]+)', r'\1_\2', s)
return re.sub(r'([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
@classmethod
def titleize_from_camelcase(cls, s):
s1 = re.sub(r'(.)([A-Z][a-z]+)', r'\1 \2', s)
return re.sub(r'([a-z0-9])([A-Z])', r'\1 \2', s1)
def __repr__(self):
return (
'ModelInflector({model}):\n'
' model: {model_singular} {model_plural}\n'
' table: {table_singular} {table_plural}\n'
' human: {human_singular} {human_plural}\n'
' title: {title_singular} {title_plural}\n')\
.format(model=self.model, **self.all_inflections())
class ModelInflectorDescriptor(object):
cache_attribute = '_cached_inflector'
def __get__(self, obj, cls):
model_inflector = getattr(cls, self.cache_attribute, None)
if model_inflector is None:
model_inflector = ModelInflector(cls)
setattr(cls, self.cache_attribute, model_inflector)
return model_inflector
# { <model class>: <ModelInflector()> }
_inflectors = {}
# { <inflection>: <model class> }
_inflection_to_model = {}
def register_inflections(inflector):
for mode, value in inflector.all_inflections().items():
_inflection_to_model[value] = inflector.model
def get_model(s):
return _inflection_to_model.get(s, None) | src/ggrc/models/inflector.py | import re
class ModelInflector(object):
def __new__(cls, model):
try:
return _inflectors[cls]
except KeyError:
inflector = super(ModelInflector, cls).__new__(cls, model)
_inflectors[model] = inflector
return inflector
def __init__(self, model):
self.model = model
register_inflections(self)
def all_inflections(self):
return {
'model_singular': self.model_singular,
'model_plural': self.model_plural,
'table_singular': self.table_singular,
'table_plural': self.table_plural,
'human_singular': self.human_singular,
'human_plural': self.human_plural,
'title_singular': self.title_singular,
'title_plural': self.title_plural,
}
@property
def model_singular(self):
return self.model.__name__
@property
def model_plural(self):
return self.title_plural.replace(' ', '')
@property
def table_singular(self):
return self.underscore_from_camelcase(self.model_singular)
@property
def table_plural(self):
return self.model.__tablename__
@property
def human_singular(self):
return self.title_singular.lower()
@property
def human_plural(self):
return self.title_plural.lower()
@property
def title_singular(self):
return self.titleize_from_camelcase(self.model.__name__)
@property
def title_plural(self):
return self.model.__tablename__.replace('_', ' ').title()
# Helpers
@classmethod
def underscore_from_camelcase(cls, s):
s1 = re.sub(r'(.)([A-Z][a-z]+)', r'\1_\2', s)
return re.sub(r'([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
@classmethod
def titleize_from_camelcase(cls, s):
s1 = re.sub(r'(.)([A-Z][a-z]+)', r'\1 \2', s)
return re.sub(r'([a-z0-9])([A-Z])', r'\1 \2', s1)
def __repr__(self):
return (
'ModelInflector({model}):\n'
' model: {model_singular} {model_plural}\n'
' table: {table_singular} {table_plural}\n'
' human: {human_singular} {human_plural}\n'
' title: {title_singular} {title_plural}\n')\
.format(model=self.model, **self.all_inflections())
class ModelInflectorDescriptor(object):
cache_attribute = '_cached_inflector'
def __get__(self, obj, cls):
model_inflector = getattr(cls, self.cache_attribute, None)
if model_inflector is None:
model_inflector = ModelInflector(cls)
setattr(cls, self.cache_attribute, model_inflector)
return model_inflector
# { <model class>: <ModelInflector()> }
_inflectors = {}
# { <inflection>: <model class> }
_inflection_to_model = {}
def register_inflections(inflector):
for mode, value in inflector.all_inflections().items():
_inflection_to_model[value] = inflector.model
def get_model(s):
return _inflection_to_model.get(s, None) | 0.575588 | 0.168173 |
import pandas as pd
import numpy as np
from os import path
from copy import deepcopy
import argparse
import os
parser = argparse.ArgumentParser(description="Argparser for Pytorch 3D CNN")
# Mandatory arguments
parser.add_argument("generalist_model_path", type=str,
help="Path to the model trained on the whole data.")
parser.add_argument("specialist_model_path", type=str,
help="Path to the model trained on the eldest subjects.")
parser.add_argument("fusion_path", type=str,
help="Path to the output directory containing the fused results.")
parser.add_argument("--split", type=int, default=None, nargs='+',
help="Will process the list of folds wanted. Default behaviour will process all available folds "
"in the generalist model directory.")
def mean_fuse_results(gen_df, spe_df, age_limit=40):
fusion_df = deepcopy(gen_df)
for participant in gen_df.index.values:
gen_age = gen_df.loc[participant, 'predicted_age']
if gen_age > age_limit:
spe_age = spe_df.loc[participant, 'predicted_age']
fusion_df.loc[participant, 'predicted_age'] = (spe_age + gen_age) / 2
return fusion_df
def evaluate_spearman(df):
from scipy.stats import spearmanr
simple_correlation, _ = spearmanr(df.true_age, df.predicted_age)
difference_correlation, _ = spearmanr(df.true_age, df.predicted_age.astype(float) - df.true_age.astype(float))
return simple_correlation, difference_correlation
def evaluate_age_limits(gen_df, spe_df):
age_limits = np.arange(30, 89)
results_df = pd.DataFrame(index=age_limits, columns=['MAE_mean', 'Spearman_mean'])
for age_limit in age_limits:
mean_df = mean_fuse_results(gen_df, spe_df, age_limit=age_limit)
MAE = np.mean(np.abs(mean_df.predicted_age - mean_df.true_age))
_, diff_Spearman = evaluate_spearman(mean_df)
results_df.loc[age_limit, 'MAE_mean'] = MAE
results_df.loc[age_limit, 'Spearman_mean'] = diff_Spearman
return results_df
def main(options):
if options.split is None:
split_dirs = [split_dir for split_dir in os.listdir(options.generalist_model_path)
if split_dir.split('_')[0] == "fold"]
options.split = sorted([int(split_dir.split('_')[1]) for split_dir in split_dirs])
for fold in options.split:
print(fold, type(fold))
gen_tv_df = pd.read_csv(path.join(options.generalist_model_path, 'fold_%i' % fold, 'performances_train',
'best_loss', 'valid_subject_level_result.tsv'), sep='\t')
spe_tv_df = pd.read_csv(path.join(options.specialist_model_path, 'fold_%i' % fold, 'performances_train',
'best_loss', 'valid_subject_level_result.tsv'), sep='\t')
gen_tv_df.set_index('participant_id', inplace=True)
spe_tv_df.set_index('participant_id', inplace=True)
results_df = evaluate_age_limits(gen_tv_df, spe_tv_df)
MAE_mean = results_df.MAE_mean.astype(float)
age_limit = MAE_mean.idxmin()
print("Min MAE %.2f for age %i" %(MAE_mean.min(), age_limit))
gen_v_df = pd.read_csv(path.join(options.generalist_model_path, 'fold_%i' % fold, 'performances_val',
'best_loss', 'valid_subject_level_result.tsv'), sep='\t')
spe_v_df = pd.read_csv(path.join(options.specialist_model_path, 'fold_%i' % fold, 'performances_val',
'best_loss', 'valid_subject_level_result.tsv'), sep='\t')
gen_v_df.set_index('participant_id', inplace=True)
spe_v_df.set_index('participant_id', inplace=True)
fusion_df = mean_fuse_results(gen_v_df, spe_v_df, age_limit=age_limit)
MAE = np.mean(np.abs(fusion_df.predicted_age - fusion_df.true_age))
_, r = evaluate_spearman(fusion_df)
print("Fusion, MAE: %.2f, r: %.2f" % (MAE, r))
MAE = np.mean(np.abs(gen_v_df.predicted_age - gen_v_df.true_age))
_, r = evaluate_spearman(gen_v_df)
print("Generalist, MAE: %.2f, r: %2f" % (MAE, r))
results_path = path.join(options.fusion_path, 'fold_%i' % fold, 'performances_val', 'best_loss')
if not path.exists(results_path):
os.makedirs(results_path)
fusion_df.to_csv(path.join(results_path, 'valid_subject_level_result.tsv'), sep='\t')
if __name__ == "__main__":
commandline = parser.parse_known_args()
options = commandline[0]
if commandline[1]:
print("unknown arguments: %s" % parser.parse_known_args()[1])
main(options) | src/deep/fuse_cnn6layer.py | import pandas as pd
import numpy as np
from os import path
from copy import deepcopy
import argparse
import os
parser = argparse.ArgumentParser(description="Argparser for Pytorch 3D CNN")
# Mandatory arguments
parser.add_argument("generalist_model_path", type=str,
help="Path to the model trained on the whole data.")
parser.add_argument("specialist_model_path", type=str,
help="Path to the model trained on the eldest subjects.")
parser.add_argument("fusion_path", type=str,
help="Path to the output directory containing the fused results.")
parser.add_argument("--split", type=int, default=None, nargs='+',
help="Will process the list of folds wanted. Default behaviour will process all available folds "
"in the generalist model directory.")
def mean_fuse_results(gen_df, spe_df, age_limit=40):
fusion_df = deepcopy(gen_df)
for participant in gen_df.index.values:
gen_age = gen_df.loc[participant, 'predicted_age']
if gen_age > age_limit:
spe_age = spe_df.loc[participant, 'predicted_age']
fusion_df.loc[participant, 'predicted_age'] = (spe_age + gen_age) / 2
return fusion_df
def evaluate_spearman(df):
from scipy.stats import spearmanr
simple_correlation, _ = spearmanr(df.true_age, df.predicted_age)
difference_correlation, _ = spearmanr(df.true_age, df.predicted_age.astype(float) - df.true_age.astype(float))
return simple_correlation, difference_correlation
def evaluate_age_limits(gen_df, spe_df):
age_limits = np.arange(30, 89)
results_df = pd.DataFrame(index=age_limits, columns=['MAE_mean', 'Spearman_mean'])
for age_limit in age_limits:
mean_df = mean_fuse_results(gen_df, spe_df, age_limit=age_limit)
MAE = np.mean(np.abs(mean_df.predicted_age - mean_df.true_age))
_, diff_Spearman = evaluate_spearman(mean_df)
results_df.loc[age_limit, 'MAE_mean'] = MAE
results_df.loc[age_limit, 'Spearman_mean'] = diff_Spearman
return results_df
def main(options):
if options.split is None:
split_dirs = [split_dir for split_dir in os.listdir(options.generalist_model_path)
if split_dir.split('_')[0] == "fold"]
options.split = sorted([int(split_dir.split('_')[1]) for split_dir in split_dirs])
for fold in options.split:
print(fold, type(fold))
gen_tv_df = pd.read_csv(path.join(options.generalist_model_path, 'fold_%i' % fold, 'performances_train',
'best_loss', 'valid_subject_level_result.tsv'), sep='\t')
spe_tv_df = pd.read_csv(path.join(options.specialist_model_path, 'fold_%i' % fold, 'performances_train',
'best_loss', 'valid_subject_level_result.tsv'), sep='\t')
gen_tv_df.set_index('participant_id', inplace=True)
spe_tv_df.set_index('participant_id', inplace=True)
results_df = evaluate_age_limits(gen_tv_df, spe_tv_df)
MAE_mean = results_df.MAE_mean.astype(float)
age_limit = MAE_mean.idxmin()
print("Min MAE %.2f for age %i" %(MAE_mean.min(), age_limit))
gen_v_df = pd.read_csv(path.join(options.generalist_model_path, 'fold_%i' % fold, 'performances_val',
'best_loss', 'valid_subject_level_result.tsv'), sep='\t')
spe_v_df = pd.read_csv(path.join(options.specialist_model_path, 'fold_%i' % fold, 'performances_val',
'best_loss', 'valid_subject_level_result.tsv'), sep='\t')
gen_v_df.set_index('participant_id', inplace=True)
spe_v_df.set_index('participant_id', inplace=True)
fusion_df = mean_fuse_results(gen_v_df, spe_v_df, age_limit=age_limit)
MAE = np.mean(np.abs(fusion_df.predicted_age - fusion_df.true_age))
_, r = evaluate_spearman(fusion_df)
print("Fusion, MAE: %.2f, r: %.2f" % (MAE, r))
MAE = np.mean(np.abs(gen_v_df.predicted_age - gen_v_df.true_age))
_, r = evaluate_spearman(gen_v_df)
print("Generalist, MAE: %.2f, r: %2f" % (MAE, r))
results_path = path.join(options.fusion_path, 'fold_%i' % fold, 'performances_val', 'best_loss')
if not path.exists(results_path):
os.makedirs(results_path)
fusion_df.to_csv(path.join(results_path, 'valid_subject_level_result.tsv'), sep='\t')
if __name__ == "__main__":
commandline = parser.parse_known_args()
options = commandline[0]
if commandline[1]:
print("unknown arguments: %s" % parser.parse_known_args()[1])
main(options) | 0.604049 | 0.196132 |
from typing import Any, Dict, List, Optional, Tuple, Union, cast
import kornia.augmentation as K
import pytorch_lightning as pl
import torch
import torchvision.transforms as T
from einops import rearrange
from kornia.contrib import compute_padding, extract_tensor_patches
from torch.utils.data import DataLoader, Dataset
from torch.utils.data._utils.collate import default_collate
from ..datasets import InriaAerialImageLabeling
from ..samplers.utils import _to_tuple
from .utils import dataset_split
DEFAULT_AUGS = K.AugmentationSequential(
K.RandomHorizontalFlip(p=0.5),
K.RandomVerticalFlip(p=0.5),
data_keys=["input", "mask"],
)
def collate_wrapper(batch: List[Dict[str, Any]]) -> Dict[str, Any]:
"""Flatten wrapper."""
r_batch: Dict[str, Any] = default_collate(batch) # type: ignore[no-untyped-call]
r_batch["image"] = torch.flatten(r_batch["image"], 0, 1)
if "mask" in r_batch:
r_batch["mask"] = torch.flatten(r_batch["mask"], 0, 1)
return r_batch
class InriaAerialImageLabelingDataModule(pl.LightningDataModule):
"""LightningDataModule implementation for the InriaAerialImageLabeling dataset.
Uses the train/test splits from the dataset and further splits
the train split into train/val splits.
.. versionadded:: 0.3
"""
h, w = 5000, 5000
def __init__(
self,
root_dir: str,
batch_size: int = 32,
num_workers: int = 0,
val_split_pct: float = 0.1,
test_split_pct: float = 0.1,
patch_size: Union[int, Tuple[int, int]] = 512,
num_patches_per_tile: int = 32,
augmentations: K.AugmentationSequential = DEFAULT_AUGS,
predict_on: str = "test",
) -> None:
"""Initialize a LightningDataModule for InriaAerialImageLabeling based DataLoaders.
Args:
root_dir: The ``root`` arugment to pass to the InriaAerialImageLabeling
Dataset classes
batch_size: The batch size used in the train DataLoader
(val_batch_size == test_batch_size == 1)
num_workers: The number of workers to use in all created DataLoaders
val_split_pct: What percentage of the dataset to use as a validation set
test_split_pct: What percentage of the dataset to use as a test set
patch_size: Size of random patch from image and mask (height, width)
num_patches_per_tile: Number of random patches per sample
augmentations: Default augmentations applied
predict_on: Directory/Dataset of images to run inference on
"""
super().__init__() # type: ignore[no-untyped-call]
self.root_dir = root_dir
self.batch_size = batch_size
self.num_workers = num_workers
self.val_split_pct = val_split_pct
self.test_split_pct = test_split_pct
self.patch_size = cast(Tuple[int, int], _to_tuple(patch_size))
self.num_patches_per_tile = num_patches_per_tile
self.augmentations = augmentations
self.predict_on = predict_on
self.random_crop = K.AugmentationSequential(
K.RandomCrop(self.patch_size, p=1.0, keepdim=False),
data_keys=["input", "mask"],
)
def patch_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]:
"""Extract patches from single sample."""
assert sample["image"].ndim == 3
_, h, w = sample["image"].shape
padding = compute_padding((h, w), self.patch_size)
sample["original_shape"] = (h, w)
sample["patch_shape"] = self.patch_size
sample["padding"] = padding
sample["image"] = extract_tensor_patches(
sample["image"].unsqueeze(0),
self.patch_size,
self.patch_size,
padding=padding,
)
sample["image"] = rearrange(sample["image"], "() t c h w -> t () c h w")
return sample
def preprocess(self, sample: Dict[str, Any]) -> Dict[str, Any]:
"""Transform a single sample from the Dataset."""
# RGB is int32 so divide by 255
sample["image"] = sample["image"] / 255.0
sample["image"] = torch.clip(sample["image"], min=0.0, max=1.0)
if "mask" in sample:
sample["mask"] = rearrange(sample["mask"], "h w -> () h w")
return sample
def n_random_crop(self, sample: Dict[str, Any]) -> Dict[str, Any]:
"""Get n random crops."""
images, masks = [], []
for _ in range(self.num_patches_per_tile):
image, mask = sample["image"], sample["mask"]
# RandomCrop needs image and mask to be in float
mask = mask.to(torch.float)
image, mask = self.random_crop(image, mask)
images.append(image.squeeze())
masks.append(mask.squeeze(0).long())
sample["image"] = torch.stack(images) # (t,c,h,w)
sample["mask"] = torch.stack(masks) # (t, 1, h, w)
return sample
def setup(self, stage: Optional[str] = None) -> None:
"""Initialize the main ``Dataset`` objects.
This method is called once per GPU per run.
"""
train_transforms = T.Compose([self.preprocess, self.n_random_crop])
test_transforms = T.Compose([self.preprocess, self.patch_sample])
train_dataset = InriaAerialImageLabeling(
self.root_dir, split="train", transforms=train_transforms
)
self.train_dataset: Dataset[Any]
self.val_dataset: Dataset[Any]
self.test_dataset: Dataset[Any]
if self.val_split_pct > 0.0:
if self.test_split_pct > 0.0:
self.train_dataset, self.val_dataset, self.test_dataset = dataset_split(
train_dataset,
val_pct=self.val_split_pct,
test_pct=self.test_split_pct,
)
else:
self.train_dataset, self.val_dataset = dataset_split(
train_dataset, val_pct=self.val_split_pct
)
self.test_dataset = self.val_dataset
else:
self.train_dataset = train_dataset
self.val_dataset = train_dataset
self.test_dataset = train_dataset
assert self.predict_on == "test"
self.predict_dataset = InriaAerialImageLabeling(
self.root_dir, self.predict_on, transforms=test_transforms
)
def train_dataloader(self) -> DataLoader[Any]:
"""Return a DataLoader for training."""
return DataLoader(
self.train_dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
collate_fn=collate_wrapper,
shuffle=True,
)
def val_dataloader(self) -> DataLoader[Any]:
"""Return a DataLoader for validation."""
return DataLoader(
self.val_dataset,
batch_size=1,
num_workers=self.num_workers,
collate_fn=collate_wrapper,
shuffle=False,
)
def test_dataloader(self) -> DataLoader[Any]:
"""Return a DataLoader for testing."""
return DataLoader(
self.test_dataset,
batch_size=1,
num_workers=self.num_workers,
collate_fn=collate_wrapper,
shuffle=False,
)
def predict_dataloader(self) -> DataLoader[Any]:
"""Return a DataLoader for prediction."""
return DataLoader(
self.predict_dataset,
batch_size=1,
num_workers=self.num_workers,
collate_fn=collate_wrapper,
shuffle=False,
)
def on_after_batch_transfer(
self, batch: Dict[str, Any], dataloader_idx: int
) -> Dict[str, Any]:
"""Apply augmentations to batch after transferring to GPU.
Args:
batch (dict): A batch of data that needs to be altered or augmented.
dataloader_idx (int): The index of the dataloader to which the batch
belongs.
Returns:
dict: A batch of data
"""
# Training
if (
hasattr(self, "trainer")
and self.trainer is not None
and hasattr(self.trainer, "training")
and self.trainer.training
and self.augmentations is not None
):
batch["mask"] = batch["mask"].to(torch.float)
batch["image"], batch["mask"] = self.augmentations(
batch["image"], batch["mask"]
)
batch["mask"] = batch["mask"].to(torch.long)
# Validation
if "mask" in batch:
batch["mask"] = rearrange(batch["mask"], "b () h w -> b h w")
return batch | torchgeo/datamodules/inria.py | from typing import Any, Dict, List, Optional, Tuple, Union, cast
import kornia.augmentation as K
import pytorch_lightning as pl
import torch
import torchvision.transforms as T
from einops import rearrange
from kornia.contrib import compute_padding, extract_tensor_patches
from torch.utils.data import DataLoader, Dataset
from torch.utils.data._utils.collate import default_collate
from ..datasets import InriaAerialImageLabeling
from ..samplers.utils import _to_tuple
from .utils import dataset_split
DEFAULT_AUGS = K.AugmentationSequential(
K.RandomHorizontalFlip(p=0.5),
K.RandomVerticalFlip(p=0.5),
data_keys=["input", "mask"],
)
def collate_wrapper(batch: List[Dict[str, Any]]) -> Dict[str, Any]:
"""Flatten wrapper."""
r_batch: Dict[str, Any] = default_collate(batch) # type: ignore[no-untyped-call]
r_batch["image"] = torch.flatten(r_batch["image"], 0, 1)
if "mask" in r_batch:
r_batch["mask"] = torch.flatten(r_batch["mask"], 0, 1)
return r_batch
class InriaAerialImageLabelingDataModule(pl.LightningDataModule):
"""LightningDataModule implementation for the InriaAerialImageLabeling dataset.
Uses the train/test splits from the dataset and further splits
the train split into train/val splits.
.. versionadded:: 0.3
"""
h, w = 5000, 5000
def __init__(
self,
root_dir: str,
batch_size: int = 32,
num_workers: int = 0,
val_split_pct: float = 0.1,
test_split_pct: float = 0.1,
patch_size: Union[int, Tuple[int, int]] = 512,
num_patches_per_tile: int = 32,
augmentations: K.AugmentationSequential = DEFAULT_AUGS,
predict_on: str = "test",
) -> None:
"""Initialize a LightningDataModule for InriaAerialImageLabeling based DataLoaders.
Args:
root_dir: The ``root`` arugment to pass to the InriaAerialImageLabeling
Dataset classes
batch_size: The batch size used in the train DataLoader
(val_batch_size == test_batch_size == 1)
num_workers: The number of workers to use in all created DataLoaders
val_split_pct: What percentage of the dataset to use as a validation set
test_split_pct: What percentage of the dataset to use as a test set
patch_size: Size of random patch from image and mask (height, width)
num_patches_per_tile: Number of random patches per sample
augmentations: Default augmentations applied
predict_on: Directory/Dataset of images to run inference on
"""
super().__init__() # type: ignore[no-untyped-call]
self.root_dir = root_dir
self.batch_size = batch_size
self.num_workers = num_workers
self.val_split_pct = val_split_pct
self.test_split_pct = test_split_pct
self.patch_size = cast(Tuple[int, int], _to_tuple(patch_size))
self.num_patches_per_tile = num_patches_per_tile
self.augmentations = augmentations
self.predict_on = predict_on
self.random_crop = K.AugmentationSequential(
K.RandomCrop(self.patch_size, p=1.0, keepdim=False),
data_keys=["input", "mask"],
)
def patch_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]:
"""Extract patches from single sample."""
assert sample["image"].ndim == 3
_, h, w = sample["image"].shape
padding = compute_padding((h, w), self.patch_size)
sample["original_shape"] = (h, w)
sample["patch_shape"] = self.patch_size
sample["padding"] = padding
sample["image"] = extract_tensor_patches(
sample["image"].unsqueeze(0),
self.patch_size,
self.patch_size,
padding=padding,
)
sample["image"] = rearrange(sample["image"], "() t c h w -> t () c h w")
return sample
def preprocess(self, sample: Dict[str, Any]) -> Dict[str, Any]:
"""Transform a single sample from the Dataset."""
# RGB is int32 so divide by 255
sample["image"] = sample["image"] / 255.0
sample["image"] = torch.clip(sample["image"], min=0.0, max=1.0)
if "mask" in sample:
sample["mask"] = rearrange(sample["mask"], "h w -> () h w")
return sample
def n_random_crop(self, sample: Dict[str, Any]) -> Dict[str, Any]:
"""Get n random crops."""
images, masks = [], []
for _ in range(self.num_patches_per_tile):
image, mask = sample["image"], sample["mask"]
# RandomCrop needs image and mask to be in float
mask = mask.to(torch.float)
image, mask = self.random_crop(image, mask)
images.append(image.squeeze())
masks.append(mask.squeeze(0).long())
sample["image"] = torch.stack(images) # (t,c,h,w)
sample["mask"] = torch.stack(masks) # (t, 1, h, w)
return sample
def setup(self, stage: Optional[str] = None) -> None:
"""Initialize the main ``Dataset`` objects.
This method is called once per GPU per run.
"""
train_transforms = T.Compose([self.preprocess, self.n_random_crop])
test_transforms = T.Compose([self.preprocess, self.patch_sample])
train_dataset = InriaAerialImageLabeling(
self.root_dir, split="train", transforms=train_transforms
)
self.train_dataset: Dataset[Any]
self.val_dataset: Dataset[Any]
self.test_dataset: Dataset[Any]
if self.val_split_pct > 0.0:
if self.test_split_pct > 0.0:
self.train_dataset, self.val_dataset, self.test_dataset = dataset_split(
train_dataset,
val_pct=self.val_split_pct,
test_pct=self.test_split_pct,
)
else:
self.train_dataset, self.val_dataset = dataset_split(
train_dataset, val_pct=self.val_split_pct
)
self.test_dataset = self.val_dataset
else:
self.train_dataset = train_dataset
self.val_dataset = train_dataset
self.test_dataset = train_dataset
assert self.predict_on == "test"
self.predict_dataset = InriaAerialImageLabeling(
self.root_dir, self.predict_on, transforms=test_transforms
)
def train_dataloader(self) -> DataLoader[Any]:
"""Return a DataLoader for training."""
return DataLoader(
self.train_dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
collate_fn=collate_wrapper,
shuffle=True,
)
def val_dataloader(self) -> DataLoader[Any]:
"""Return a DataLoader for validation."""
return DataLoader(
self.val_dataset,
batch_size=1,
num_workers=self.num_workers,
collate_fn=collate_wrapper,
shuffle=False,
)
def test_dataloader(self) -> DataLoader[Any]:
"""Return a DataLoader for testing."""
return DataLoader(
self.test_dataset,
batch_size=1,
num_workers=self.num_workers,
collate_fn=collate_wrapper,
shuffle=False,
)
def predict_dataloader(self) -> DataLoader[Any]:
"""Return a DataLoader for prediction."""
return DataLoader(
self.predict_dataset,
batch_size=1,
num_workers=self.num_workers,
collate_fn=collate_wrapper,
shuffle=False,
)
def on_after_batch_transfer(
self, batch: Dict[str, Any], dataloader_idx: int
) -> Dict[str, Any]:
"""Apply augmentations to batch after transferring to GPU.
Args:
batch (dict): A batch of data that needs to be altered or augmented.
dataloader_idx (int): The index of the dataloader to which the batch
belongs.
Returns:
dict: A batch of data
"""
# Training
if (
hasattr(self, "trainer")
and self.trainer is not None
and hasattr(self.trainer, "training")
and self.trainer.training
and self.augmentations is not None
):
batch["mask"] = batch["mask"].to(torch.float)
batch["image"], batch["mask"] = self.augmentations(
batch["image"], batch["mask"]
)
batch["mask"] = batch["mask"].to(torch.long)
# Validation
if "mask" in batch:
batch["mask"] = rearrange(batch["mask"], "b () h w -> b h w")
return batch | 0.962488 | 0.531027 |
__author__ = "<EMAIL> (Tim 'mithro' Ansell)"
import datetime
import json
import time
import urllib
import urllib2
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--server", help="server to register on", action="store",
default="http://localhost:8000/tracker/endpoint/register")
parser.add_argument(
"--secret", help="secret to use to register", action="store",
default="move me to config.private.json")
parser.add_argument(
"--group", help="group to register on the server", action="store",
default="example")
parser.add_argument(
"--ip", help="IP to pretend to be", action="store",
default="")
if __name__ == "__main__":
args = parser.parse_args()
while True:
data = {
"overall_clients": 0,
"overall_bitrate": 0,
"overall_cbitrate": 0,
}
totals = [('ogg_high', 4, 1e6, 2e6)]
for name, clients, bitrate, streambitrate in totals:
fixed_name = name.replace('http-', '').replace('-', '_')
data[fixed_name+"_clients"] = int(clients)
data[fixed_name+"_bitrate"] = float(bitrate)
data[fixed_name+"_cbitrate"] = float(bitrate)
data["overall_clients"] += clients
data["overall_bitrate"] += bitrate
data["overall_cbitrate"] += clients*streambitrate
for group in args.group.split(','):
try:
req = urllib2.Request(
args.server,
urllib.urlencode((
('secret', args.secret),
('group', group),
('data', json.dumps(data)),
('REMOTE_ADDR', args.ip),
)))
r = urllib2.urlopen(req)
except urllib2.HTTPError, e:
print e
print e.read()
raise
print "Registered", group, "at", datetime.datetime.now(), "result", r.read().strip()
time.sleep(1) | tools/register/fake_register.py | __author__ = "<EMAIL> (Tim 'mithro' Ansell)"
import datetime
import json
import time
import urllib
import urllib2
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--server", help="server to register on", action="store",
default="http://localhost:8000/tracker/endpoint/register")
parser.add_argument(
"--secret", help="secret to use to register", action="store",
default="move me to config.private.json")
parser.add_argument(
"--group", help="group to register on the server", action="store",
default="example")
parser.add_argument(
"--ip", help="IP to pretend to be", action="store",
default="")
if __name__ == "__main__":
args = parser.parse_args()
while True:
data = {
"overall_clients": 0,
"overall_bitrate": 0,
"overall_cbitrate": 0,
}
totals = [('ogg_high', 4, 1e6, 2e6)]
for name, clients, bitrate, streambitrate in totals:
fixed_name = name.replace('http-', '').replace('-', '_')
data[fixed_name+"_clients"] = int(clients)
data[fixed_name+"_bitrate"] = float(bitrate)
data[fixed_name+"_cbitrate"] = float(bitrate)
data["overall_clients"] += clients
data["overall_bitrate"] += bitrate
data["overall_cbitrate"] += clients*streambitrate
for group in args.group.split(','):
try:
req = urllib2.Request(
args.server,
urllib.urlencode((
('secret', args.secret),
('group', group),
('data', json.dumps(data)),
('REMOTE_ADDR', args.ip),
)))
r = urllib2.urlopen(req)
except urllib2.HTTPError, e:
print e
print e.read()
raise
print "Registered", group, "at", datetime.datetime.now(), "result", r.read().strip()
time.sleep(1) | 0.150465 | 0.081666 |
import os
import argparse
import torch
import rlcard
from rlcard.agents import RandomAgent
from rlcard.utils import get_device, set_seed, tournament, Logger, plot_curve
from Agent.HelperTwo import HelperAgent
from Utils import get_first, get_single, get_dead_actions, get_none_1_none, get_two_continue, get_1_gap, get_two_same, get_three_same, get_three_continue
def reshape_reward(trajectory, payoff):
''' Reorganize the trajectory to make it RL friendly
Args:
trajectory (list): A list of trajectories
payoffs (list): A list of payoffs for the players. Each entry corresponds to one player
Returns:
(list): A new trajectories that can be fed into RL algorithms.
'''
new_trajectories = [[]]
for i in range(0, len(trajectory)-2, 2):
reward = 0
# 设置是否结束
if i ==len(trajectory)-3:
done = True
else:
done = False
# 获取当前状态
current_state = trajectory[i]
good_0_actions = get_first(current_state['obs'][0])
good_1_actions = get_single(current_state['obs'][0]) + get_dead_actions(current_state) # 最好的行为:单张+死牌+听碰杠吃
good_2_actions = get_none_1_none(current_state['obs'][0]) # 次的行为:空有空
good_3_actions = get_two_continue(current_state['obs'][0]) + get_1_gap(current_state['obs'][0]) # 有空有+两个连续的
good_4_actions = get_two_same(current_state['obs'][0]) + get_three_same(
current_state['obs'][0]) + get_three_continue(
current_state['obs'][0]) # 次次行为,拆掉连续的
legal_actions = current_state['legal_actions']
legal_actions = sorted(legal_actions)
c_good_0 = []
c_good_1 = []
c_good_2 = []
c_good_3 = []
c_good_4 = []
c_others = []
c_legal_actions = []
# 计算不同的出牌等级
for j in legal_actions:
c_legal_actions.append(j)
if j in good_0_actions:
c_good_0.append(j)
elif j in good_1_actions:
c_good_1.append(j)
elif j in good_2_actions:
c_good_2.append(j)
elif j in good_3_actions:
c_good_3.append(j)
elif j in good_4_actions:
c_good_4.append(j)
else:
c_others.append(j)
action_make = trajectory[i + 1]
# 查看正确的行为在哪一层次
if action_make in c_good_0:
reward = 1
elif action_make in c_good_1:
reward = 0.8
elif action_make in c_good_2:
reward = 0.6
elif action_make in c_others:
reward = 0.4
elif action_make in c_good_3:
reward = 0.2
elif action_make in c_good_4:
reward = 0
transition = trajectory[i:i+3].copy()
transition.insert(2, reward)
transition.append(done)
new_trajectories[0].append(transition)
return new_trajectories
def save_model(dueling_agent, epoch, score):
save = {
'net': dueling_agent.q_estimator.qnet.state_dict(),
'optimizer': dueling_agent.q_estimator.optimizer.state_dict(),
'epoch': epoch
}
torch.save(save, os.path.join(os.getcwd(), 'TrainedModel/DQNwithrewardhelper', str(epoch) + '_' + str(score).replace(".", "-") + '_' + 'DQNwithrewardhelper.pth'))
def train(args):
# Check whether gpu is available
device = get_device()
# Seed numpy, torch, random
set_seed(args.seed)
# Make the environment with seed
env = rlcard.make(args.env, config={'seed': args.seed})
# Initialize the agent and use random agents as opponents
if args.algorithm == 'dqn':
from rlcard.agents import DQNAgent
agent = DQNAgent(num_actions=env.num_actions,
state_shape=env.state_shape[0],
mlp_layers=[64, 64],
learning_rate=0.00001,
device=device)
elif args.algorithm == 'nfsp':
from rlcard.agents import NFSPAgent
agent = NFSPAgent(num_actions=env.num_actions,
state_shape=env.state_shape[0],
hidden_layers_sizes=[64, 64],
q_mlp_layers=[64, 64],
device=device)
agents = [agent]
for _ in range(env.num_players - 1):
agents.append(RandomAgent(num_actions=env.num_actions))
env.set_agents(agents)
env_learn = rlcard.make(args.env, config={'seed': args.seed})
agents_learn = [HelperAgent(num_actions=env_learn.num_actions)]
for _ in range(env.num_players - 1):
agents_learn.append(RandomAgent(num_actions=env.num_actions))
env_learn.set_agents(agents_learn)
# Start training
with Logger(args.log_dir) as logger:
for episode in range(args.num_episodes):
if args.algorithm == 'nfsp':
agents[0].sample_episode_policy()
# Generate data from the environment
trajectories, payoffs = env_learn.run(is_training=True)
# Reorganaize the data to be state, action, reward, next_state, done
trajectories = reshape_reward(trajectories[0], payoffs[0])
# Feed transitions into agent memory, and train the agent
# Here, we assume that DQN always plays the first position
# and the other players play randomly (if any)
for ts in trajectories[0]:
agent.feed(ts)
# Evaluate the performance. Play with random agents.
if episode % args.evaluate_every == 0:
score = tournament(env, args.num_eval_games)[0]
save_model(agents[0], episode, score)
logger.log_performance(env.timestep, score)
# Get the paths
csv_path, fig_path = logger.csv_path, logger.fig_path
# Plot the learning curve
plot_curve(csv_path, fig_path, args.algorithm)
# Save model
save_path = os.path.join(args.log_dir, 'model.pth')
torch.save(agent, save_path)
print('TrainedModel saved in', save_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser("DQN/NFSP example in RLCard")
parser.add_argument('--env', type=str, default='mahjong',
choices=['blackjack', 'leduc-holdem', 'limit-holdem', 'doudizhu', 'mahjong', 'no-limit-holdem',
'uno', 'gin-rummy'])
parser.add_argument('--algorithm', type=str, default='dqn', choices=['dqn', 'nfsp'])
parser.add_argument('--cuda', type=str, default='')
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--num_episodes', type=int, default=100000)
parser.add_argument('--num_eval_games', type=int, default=2000)
parser.add_argument('--evaluate_every', type=int, default=300)
parser.add_argument('--log_dir', type=str, default='experiments/mahjong_nfsp_result/')
args = parser.parse_args(args=[])
os.environ["CUDA_VISIBLE_DEVICES"] = args.cuda
train(args) | RLCardDQN.py | import os
import argparse
import torch
import rlcard
from rlcard.agents import RandomAgent
from rlcard.utils import get_device, set_seed, tournament, Logger, plot_curve
from Agent.HelperTwo import HelperAgent
from Utils import get_first, get_single, get_dead_actions, get_none_1_none, get_two_continue, get_1_gap, get_two_same, get_three_same, get_three_continue
def reshape_reward(trajectory, payoff):
''' Reorganize the trajectory to make it RL friendly
Args:
trajectory (list): A list of trajectories
payoffs (list): A list of payoffs for the players. Each entry corresponds to one player
Returns:
(list): A new trajectories that can be fed into RL algorithms.
'''
new_trajectories = [[]]
for i in range(0, len(trajectory)-2, 2):
reward = 0
# 设置是否结束
if i ==len(trajectory)-3:
done = True
else:
done = False
# 获取当前状态
current_state = trajectory[i]
good_0_actions = get_first(current_state['obs'][0])
good_1_actions = get_single(current_state['obs'][0]) + get_dead_actions(current_state) # 最好的行为:单张+死牌+听碰杠吃
good_2_actions = get_none_1_none(current_state['obs'][0]) # 次的行为:空有空
good_3_actions = get_two_continue(current_state['obs'][0]) + get_1_gap(current_state['obs'][0]) # 有空有+两个连续的
good_4_actions = get_two_same(current_state['obs'][0]) + get_three_same(
current_state['obs'][0]) + get_three_continue(
current_state['obs'][0]) # 次次行为,拆掉连续的
legal_actions = current_state['legal_actions']
legal_actions = sorted(legal_actions)
c_good_0 = []
c_good_1 = []
c_good_2 = []
c_good_3 = []
c_good_4 = []
c_others = []
c_legal_actions = []
# 计算不同的出牌等级
for j in legal_actions:
c_legal_actions.append(j)
if j in good_0_actions:
c_good_0.append(j)
elif j in good_1_actions:
c_good_1.append(j)
elif j in good_2_actions:
c_good_2.append(j)
elif j in good_3_actions:
c_good_3.append(j)
elif j in good_4_actions:
c_good_4.append(j)
else:
c_others.append(j)
action_make = trajectory[i + 1]
# 查看正确的行为在哪一层次
if action_make in c_good_0:
reward = 1
elif action_make in c_good_1:
reward = 0.8
elif action_make in c_good_2:
reward = 0.6
elif action_make in c_others:
reward = 0.4
elif action_make in c_good_3:
reward = 0.2
elif action_make in c_good_4:
reward = 0
transition = trajectory[i:i+3].copy()
transition.insert(2, reward)
transition.append(done)
new_trajectories[0].append(transition)
return new_trajectories
def save_model(dueling_agent, epoch, score):
save = {
'net': dueling_agent.q_estimator.qnet.state_dict(),
'optimizer': dueling_agent.q_estimator.optimizer.state_dict(),
'epoch': epoch
}
torch.save(save, os.path.join(os.getcwd(), 'TrainedModel/DQNwithrewardhelper', str(epoch) + '_' + str(score).replace(".", "-") + '_' + 'DQNwithrewardhelper.pth'))
def train(args):
# Check whether gpu is available
device = get_device()
# Seed numpy, torch, random
set_seed(args.seed)
# Make the environment with seed
env = rlcard.make(args.env, config={'seed': args.seed})
# Initialize the agent and use random agents as opponents
if args.algorithm == 'dqn':
from rlcard.agents import DQNAgent
agent = DQNAgent(num_actions=env.num_actions,
state_shape=env.state_shape[0],
mlp_layers=[64, 64],
learning_rate=0.00001,
device=device)
elif args.algorithm == 'nfsp':
from rlcard.agents import NFSPAgent
agent = NFSPAgent(num_actions=env.num_actions,
state_shape=env.state_shape[0],
hidden_layers_sizes=[64, 64],
q_mlp_layers=[64, 64],
device=device)
agents = [agent]
for _ in range(env.num_players - 1):
agents.append(RandomAgent(num_actions=env.num_actions))
env.set_agents(agents)
env_learn = rlcard.make(args.env, config={'seed': args.seed})
agents_learn = [HelperAgent(num_actions=env_learn.num_actions)]
for _ in range(env.num_players - 1):
agents_learn.append(RandomAgent(num_actions=env.num_actions))
env_learn.set_agents(agents_learn)
# Start training
with Logger(args.log_dir) as logger:
for episode in range(args.num_episodes):
if args.algorithm == 'nfsp':
agents[0].sample_episode_policy()
# Generate data from the environment
trajectories, payoffs = env_learn.run(is_training=True)
# Reorganaize the data to be state, action, reward, next_state, done
trajectories = reshape_reward(trajectories[0], payoffs[0])
# Feed transitions into agent memory, and train the agent
# Here, we assume that DQN always plays the first position
# and the other players play randomly (if any)
for ts in trajectories[0]:
agent.feed(ts)
# Evaluate the performance. Play with random agents.
if episode % args.evaluate_every == 0:
score = tournament(env, args.num_eval_games)[0]
save_model(agents[0], episode, score)
logger.log_performance(env.timestep, score)
# Get the paths
csv_path, fig_path = logger.csv_path, logger.fig_path
# Plot the learning curve
plot_curve(csv_path, fig_path, args.algorithm)
# Save model
save_path = os.path.join(args.log_dir, 'model.pth')
torch.save(agent, save_path)
print('TrainedModel saved in', save_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser("DQN/NFSP example in RLCard")
parser.add_argument('--env', type=str, default='mahjong',
choices=['blackjack', 'leduc-holdem', 'limit-holdem', 'doudizhu', 'mahjong', 'no-limit-holdem',
'uno', 'gin-rummy'])
parser.add_argument('--algorithm', type=str, default='dqn', choices=['dqn', 'nfsp'])
parser.add_argument('--cuda', type=str, default='')
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--num_episodes', type=int, default=100000)
parser.add_argument('--num_eval_games', type=int, default=2000)
parser.add_argument('--evaluate_every', type=int, default=300)
parser.add_argument('--log_dir', type=str, default='experiments/mahjong_nfsp_result/')
args = parser.parse_args(args=[])
os.environ["CUDA_VISIBLE_DEVICES"] = args.cuda
train(args) | 0.550124 | 0.234275 |
import argparse
from colorama import Fore
import csv
import psycopg2
import psycopg2.extras
import re
import signal
import sys
def main():
# parse the command line arguments
parser = argparse.ArgumentParser(
description="Add ORCID identifiers to items for a given author name from CSV. Respects the author order from the dc.contributor.author field."
)
parser.add_argument(
"--author-field-name",
"-f",
help="Name of column with author names.",
default="dc.contributor.author",
)
parser.add_argument(
"--csv-file",
"-i",
help="CSV file containing author names and ORCID identifiers.",
required=True,
type=argparse.FileType("r", encoding="UTF-8"),
)
parser.add_argument("--database-name", "-db", help="Database name", required=True)
parser.add_argument(
"--database-user", "-u", help="Database username", required=True
)
parser.add_argument(
"--database-pass", "-p", help="Database password", required=True
)
parser.add_argument(
"--debug",
"-d",
help="Print debug messages to standard error (stderr).",
action="store_true",
)
parser.add_argument(
"--dry-run",
"-n",
help="Only print changes that would be made.",
action="store_true",
)
parser.add_argument(
"--orcid-field-name",
"-o",
help='Name of column with creators in "Name: 0000-0000-0000-0000" format.',
default="cg.creator.identifier",
)
args = parser.parse_args()
# set the signal handler for SIGINT (^C) so we can exit cleanly
signal.signal(signal.SIGINT, signal_handler)
# connect to database
try:
conn_string = "dbname={0} user={1} password={2} host=localhost".format(
args.database_name, args.database_user, args.database_pass
)
conn = psycopg2.connect(conn_string)
if args.debug:
sys.stderr.write(Fore.GREEN + "Connected to the database.\n" + Fore.RESET)
except psycopg2.OperationalError:
sys.stderr.write(Fore.RED + "Unable to connect to the database.\n" + Fore.RESET)
# close output file before we exit
args.csv_file.close()
exit(1)
# open the CSV
reader = csv.DictReader(args.csv_file)
# iterate over rows in the CSV
for row in reader:
author_name = row[args.author_field_name]
if args.debug:
sys.stderr.write(
Fore.GREEN
+ "Finding items with author name: {0}\n".format(author_name)
+ Fore.RESET
)
with conn:
# cursor will be closed after this block exits
# see: http://initd.org/psycopg/docs/usage.html#with-statement
with conn.cursor() as cursor:
# find all item metadata records with this author name
# metadata_field_id 3 is author
sql = "SELECT dspace_object_id, place FROM metadatavalue WHERE dspace_object_id IN (SELECT uuid FROM item) AND metadata_field_id=3 AND text_value=%s"
# remember that tuples with one item need a comma after them!
cursor.execute(sql, (author_name,))
records_with_author_name = cursor.fetchall()
if len(records_with_author_name) >= 0:
if args.debug:
sys.stderr.write(
Fore.GREEN
+ "Found {0} items.\n".format(len(records_with_author_name))
+ Fore.RESET
)
# extract cg.creator.identifier text to add from CSV and strip leading/trailing whitespace
text_value = row[args.orcid_field_name].strip()
# extract the ORCID identifier from the cg.creator.identifier text field in the CSV
orcid_identifier_pattern = re.compile(
r"[A-Z0-9]{4}-[A-Z0-9]{4}-[A-Z0-9]{4}-[A-Z0-9]{4}"
)
orcid_identifier_match = orcid_identifier_pattern.search(text_value)
# sanity check to make sure we extracted the ORCID identifier from the cg.creator.identifier text in the CSV
if orcid_identifier_match is None:
if args.debug:
sys.stderr.write(
Fore.YELLOW
+ 'Skipping invalid ORCID identifier in "{0}".\n'.format(
text_value
)
+ Fore.RESET
)
continue
# we only expect one ORCID identifier, so if it matches it will be group "0"
# see: https://docs.python.org/3/library/re.html
orcid_identifier = orcid_identifier_match.group(0)
# iterate over results for current author name to add cg.creator.identifier metadata
for record in records_with_author_name:
dspace_object_id = record[0]
# "place" is the order of a metadata value so we can add the cg.creator.identifier metadata matching the author order
place = record[1]
confidence = -1
# get the metadata_field_id for the cg.creator.identifier field
sql = "SELECT metadata_field_id FROM metadatafieldregistry WHERE metadata_schema_id=2 AND element='creator' AND qualifier='identifier'"
cursor.execute(sql)
metadata_field_id = cursor.fetchall()[0]
# check if there is an existing cg.creator.identifier with this author's ORCID identifier for this item (without restricting the "place")
# note that the SQL here is quoted differently to allow us to use LIKE with % wildcards with our paremeter subsitution
sql = "SELECT * from metadatavalue WHERE dspace_object_id=%s AND metadata_field_id=%s AND text_value LIKE '%%' || %s || '%%' AND confidence=%s AND dspace_object_id IN (SELECT uuid FROM item)"
# Adapt Python’s uuid.UUID type to PostgreSQL's uuid
# See: https://www.psycopg.org/docs/extras.html
psycopg2.extras.register_uuid()
cursor.execute(
sql,
(
dspace_object_id,
metadata_field_id,
orcid_identifier,
confidence,
),
)
records_with_orcid_identifier = cursor.fetchall()
if len(records_with_orcid_identifier) == 0:
if args.dry_run:
print(
'Would add ORCID identifier "{0}" to item {1}.'.format(
text_value, dspace_object_id
)
)
continue
print(
'Adding ORCID identifier "{0}" to item {1}.'.format(
text_value, dspace_object_id
)
)
# metadatavalue IDs come from a PostgreSQL sequence that increments when you call it
cursor.execute("SELECT nextval('metadatavalue_seq')")
metadata_value_id = cursor.fetchone()[0]
sql = "INSERT INTO metadatavalue (metadata_value_id, dspace_object_id, metadata_field_id, text_value, place, confidence) VALUES (%s, %s, %s, %s, %s, %s)"
cursor.execute(
sql,
(
metadata_value_id,
dspace_object_id,
metadata_field_id,
text_value,
place,
confidence,
),
)
else:
if args.debug:
sys.stderr.write(
Fore.GREEN
+ "Item {0} already has an ORCID identifier for {1}.\n".format(
dspace_object_id, text_value
)
+ Fore.RESET
)
if args.debug:
sys.stderr.write(Fore.GREEN + "Disconnecting from database.\n" + Fore.RESET)
# close the database connection before leaving
conn.close()
# close output file before we exit
args.csv_file.close()
def signal_handler(signal, frame):
sys.exit(1)
if __name__ == "__main__":
main() | ilri/add-orcid-identifiers-csv.py |
import argparse
from colorama import Fore
import csv
import psycopg2
import psycopg2.extras
import re
import signal
import sys
def main():
# parse the command line arguments
parser = argparse.ArgumentParser(
description="Add ORCID identifiers to items for a given author name from CSV. Respects the author order from the dc.contributor.author field."
)
parser.add_argument(
"--author-field-name",
"-f",
help="Name of column with author names.",
default="dc.contributor.author",
)
parser.add_argument(
"--csv-file",
"-i",
help="CSV file containing author names and ORCID identifiers.",
required=True,
type=argparse.FileType("r", encoding="UTF-8"),
)
parser.add_argument("--database-name", "-db", help="Database name", required=True)
parser.add_argument(
"--database-user", "-u", help="Database username", required=True
)
parser.add_argument(
"--database-pass", "-p", help="Database password", required=True
)
parser.add_argument(
"--debug",
"-d",
help="Print debug messages to standard error (stderr).",
action="store_true",
)
parser.add_argument(
"--dry-run",
"-n",
help="Only print changes that would be made.",
action="store_true",
)
parser.add_argument(
"--orcid-field-name",
"-o",
help='Name of column with creators in "Name: 0000-0000-0000-0000" format.',
default="cg.creator.identifier",
)
args = parser.parse_args()
# set the signal handler for SIGINT (^C) so we can exit cleanly
signal.signal(signal.SIGINT, signal_handler)
# connect to database
try:
conn_string = "dbname={0} user={1} password={2} host=localhost".format(
args.database_name, args.database_user, args.database_pass
)
conn = psycopg2.connect(conn_string)
if args.debug:
sys.stderr.write(Fore.GREEN + "Connected to the database.\n" + Fore.RESET)
except psycopg2.OperationalError:
sys.stderr.write(Fore.RED + "Unable to connect to the database.\n" + Fore.RESET)
# close output file before we exit
args.csv_file.close()
exit(1)
# open the CSV
reader = csv.DictReader(args.csv_file)
# iterate over rows in the CSV
for row in reader:
author_name = row[args.author_field_name]
if args.debug:
sys.stderr.write(
Fore.GREEN
+ "Finding items with author name: {0}\n".format(author_name)
+ Fore.RESET
)
with conn:
# cursor will be closed after this block exits
# see: http://initd.org/psycopg/docs/usage.html#with-statement
with conn.cursor() as cursor:
# find all item metadata records with this author name
# metadata_field_id 3 is author
sql = "SELECT dspace_object_id, place FROM metadatavalue WHERE dspace_object_id IN (SELECT uuid FROM item) AND metadata_field_id=3 AND text_value=%s"
# remember that tuples with one item need a comma after them!
cursor.execute(sql, (author_name,))
records_with_author_name = cursor.fetchall()
if len(records_with_author_name) >= 0:
if args.debug:
sys.stderr.write(
Fore.GREEN
+ "Found {0} items.\n".format(len(records_with_author_name))
+ Fore.RESET
)
# extract cg.creator.identifier text to add from CSV and strip leading/trailing whitespace
text_value = row[args.orcid_field_name].strip()
# extract the ORCID identifier from the cg.creator.identifier text field in the CSV
orcid_identifier_pattern = re.compile(
r"[A-Z0-9]{4}-[A-Z0-9]{4}-[A-Z0-9]{4}-[A-Z0-9]{4}"
)
orcid_identifier_match = orcid_identifier_pattern.search(text_value)
# sanity check to make sure we extracted the ORCID identifier from the cg.creator.identifier text in the CSV
if orcid_identifier_match is None:
if args.debug:
sys.stderr.write(
Fore.YELLOW
+ 'Skipping invalid ORCID identifier in "{0}".\n'.format(
text_value
)
+ Fore.RESET
)
continue
# we only expect one ORCID identifier, so if it matches it will be group "0"
# see: https://docs.python.org/3/library/re.html
orcid_identifier = orcid_identifier_match.group(0)
# iterate over results for current author name to add cg.creator.identifier metadata
for record in records_with_author_name:
dspace_object_id = record[0]
# "place" is the order of a metadata value so we can add the cg.creator.identifier metadata matching the author order
place = record[1]
confidence = -1
# get the metadata_field_id for the cg.creator.identifier field
sql = "SELECT metadata_field_id FROM metadatafieldregistry WHERE metadata_schema_id=2 AND element='creator' AND qualifier='identifier'"
cursor.execute(sql)
metadata_field_id = cursor.fetchall()[0]
# check if there is an existing cg.creator.identifier with this author's ORCID identifier for this item (without restricting the "place")
# note that the SQL here is quoted differently to allow us to use LIKE with % wildcards with our paremeter subsitution
sql = "SELECT * from metadatavalue WHERE dspace_object_id=%s AND metadata_field_id=%s AND text_value LIKE '%%' || %s || '%%' AND confidence=%s AND dspace_object_id IN (SELECT uuid FROM item)"
# Adapt Python’s uuid.UUID type to PostgreSQL's uuid
# See: https://www.psycopg.org/docs/extras.html
psycopg2.extras.register_uuid()
cursor.execute(
sql,
(
dspace_object_id,
metadata_field_id,
orcid_identifier,
confidence,
),
)
records_with_orcid_identifier = cursor.fetchall()
if len(records_with_orcid_identifier) == 0:
if args.dry_run:
print(
'Would add ORCID identifier "{0}" to item {1}.'.format(
text_value, dspace_object_id
)
)
continue
print(
'Adding ORCID identifier "{0}" to item {1}.'.format(
text_value, dspace_object_id
)
)
# metadatavalue IDs come from a PostgreSQL sequence that increments when you call it
cursor.execute("SELECT nextval('metadatavalue_seq')")
metadata_value_id = cursor.fetchone()[0]
sql = "INSERT INTO metadatavalue (metadata_value_id, dspace_object_id, metadata_field_id, text_value, place, confidence) VALUES (%s, %s, %s, %s, %s, %s)"
cursor.execute(
sql,
(
metadata_value_id,
dspace_object_id,
metadata_field_id,
text_value,
place,
confidence,
),
)
else:
if args.debug:
sys.stderr.write(
Fore.GREEN
+ "Item {0} already has an ORCID identifier for {1}.\n".format(
dspace_object_id, text_value
)
+ Fore.RESET
)
if args.debug:
sys.stderr.write(Fore.GREEN + "Disconnecting from database.\n" + Fore.RESET)
# close the database connection before leaving
conn.close()
# close output file before we exit
args.csv_file.close()
def signal_handler(signal, frame):
sys.exit(1)
if __name__ == "__main__":
main() | 0.380874 | 0.117623 |
from MoinMoin.script import MoinScript
class PluginScript(MoinScript):
"""\
Purpose:
========
This tool allows you to disable user accounts via a command line interface.
Detailed Instructions:
======================
General syntax: moin [options] account disable [disable-options]
[options] usually should be:
--config-dir=/path/to/my/cfg/ --wiki-url=http://wiki.example.org/
[disable-options] see below:
0. Verify that you really want to disable the account.
While there is a disable script, no such enable script exists.
1. To disable the user 'JohnSmith':
moin ... account disable --name JohnSmith
2. To disable the user 'JohnSmith', based on his UID '1198872910.78.56322':
moin ... account disable --uid 1198872910.78.56322
"""
def __init__(self, argv, def_values):
MoinScript.__init__(self, argv, def_values)
self.parser.add_option(
"--uid", metavar="UID", dest="uid",
help="Disable the user with user id UID."
)
self.parser.add_option(
"--name", metavar="NAME", dest="uname",
help="Disable the user with user name NAME."
)
def mainloop(self):
# we don't expect non-option arguments
if len(self.args) != 0:
self.parser.error("incorrect number of arguments")
flags_given = self.options.uid or self.options.uname
if not flags_given:
self.parser.print_help()
import sys
sys.exit(1)
self.init_request()
request = self.request
from MoinMoin import user
if self.options.uid:
u = user.User(request, self.options.uid)
elif self.options.uname:
u = user.User(request, None, self.options.uname)
if not u.exists():
print 'This user "%s" does not exists!' % u.name
return
print " %-20s %-25s %-35s" % (u.id, u.name, u.email),
if not u.disabled: # only disable once
u.disabled = 1
u.name = "%s-%s" % (u.name, u.id)
if u.email:
u.email = "%s-%s" % (u.email, u.id)
u.subscribed_pages = "" # avoid using email
u.save()
print "- disabled."
else:
print "- is already disabled." | MoinMoin/script/account/disable.py | from MoinMoin.script import MoinScript
class PluginScript(MoinScript):
"""\
Purpose:
========
This tool allows you to disable user accounts via a command line interface.
Detailed Instructions:
======================
General syntax: moin [options] account disable [disable-options]
[options] usually should be:
--config-dir=/path/to/my/cfg/ --wiki-url=http://wiki.example.org/
[disable-options] see below:
0. Verify that you really want to disable the account.
While there is a disable script, no such enable script exists.
1. To disable the user 'JohnSmith':
moin ... account disable --name JohnSmith
2. To disable the user 'JohnSmith', based on his UID '1198872910.78.56322':
moin ... account disable --uid 1198872910.78.56322
"""
def __init__(self, argv, def_values):
MoinScript.__init__(self, argv, def_values)
self.parser.add_option(
"--uid", metavar="UID", dest="uid",
help="Disable the user with user id UID."
)
self.parser.add_option(
"--name", metavar="NAME", dest="uname",
help="Disable the user with user name NAME."
)
def mainloop(self):
# we don't expect non-option arguments
if len(self.args) != 0:
self.parser.error("incorrect number of arguments")
flags_given = self.options.uid or self.options.uname
if not flags_given:
self.parser.print_help()
import sys
sys.exit(1)
self.init_request()
request = self.request
from MoinMoin import user
if self.options.uid:
u = user.User(request, self.options.uid)
elif self.options.uname:
u = user.User(request, None, self.options.uname)
if not u.exists():
print 'This user "%s" does not exists!' % u.name
return
print " %-20s %-25s %-35s" % (u.id, u.name, u.email),
if not u.disabled: # only disable once
u.disabled = 1
u.name = "%s-%s" % (u.name, u.id)
if u.email:
u.email = "%s-%s" % (u.email, u.id)
u.subscribed_pages = "" # avoid using email
u.save()
print "- disabled."
else:
print "- is already disabled." | 0.53437 | 0.113973 |
import torch
import torch.nn as nn
from hyper import cfg
class Conv2dAuto(nn.Conv2d):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.padding = (self.kernel_size[0] // 2, self.kernel_size[1] // 2)
def activation_func(activation):
return nn.ModuleDict([
['relu', nn.ReLU(inplace=True)],
['leaky_relu', nn.LeakyReLU(negative_slope=0.01, inplace=True)],
['selu', nn.SELU(inplace=True)],
['none', nn.Identity()]
])[activation]
class CNNet(nn.Module):
def __init__(self, in_dim = 32, in_chans = 3, filters = [32, 32, 64, 128, 1], kernels = [5, 3, 3, 3, 1], dim_s = [1,1,2,2,1], activation = "selu", drop_p = 0.1):
super().__init__()
self.convs = nn.ModuleList([Conv2dAuto(in_channels = in_chans, out_channels = filters[0], stride = dim_s[0], kernel_size = kernels[0])] +
[Conv2dAuto(in_channels = filters[i-1], out_channels = filters[i], stride = dim_s[i], kernel_size = kernels[i]) for i in range(1, len(filters))])
self.batch_norms = nn.ModuleList([nn.BatchNorm2d(num_features= filters[i]) for i in range(len(filters))])
self.setup(torch.rand(1,in_chans, in_dim, in_dim))
self.denses = nn.ModuleList([nn.Linear(in_features = self.lin_in_shape, out_features=128),
nn.Linear(in_features = 128, out_features=4)])
self.act = activation_func(activation)
self.drop = nn.Dropout(p=drop_p)
def setup(self, x):
for l in self.convs:
x = l(x)
self.lin_in_shape = x.shape[-1]*x.shape[-2]*x.shape[-3]
def forward(self, x):
for l, b in zip(self.convs, self.batch_norms):
x = self.drop(self.act(b(l(x))))
x = x.view(-1, self.lin_in_shape)
x = self.denses[0](x)
x = self.act(x)
x = self.drop(x)
x = self.denses[1](x)
return x
class FCNet(nn.Module):
def __init__(self, in_dim = 32, in_chans = None, layer_dims = [128, 64, 4], activation = "selu", drop_p = 0.1):
super().__init__()
self.layer_dims = [in_dim] + layer_dims
self.denses = nn.ModuleList([nn.Linear(in_features = self.layer_dims[i], out_features = self.layer_dims[i+1]) for i in range(len(layer_dims))])
self.bns = nn.ModuleList([nn.BatchNorm1d(num_features= self.layer_dims[i + 1]) for i in range(len(layer_dims) - 1)])
self.act = activation_func(activation)
self.drop = nn.Dropout(p=drop_p)
def forward(self, x):
for i, l in enumerate(self.denses):
x = l(x)
if not i == len(self.denses) - 1:
x = self.bns[i](x)
x = self.act(x)
if not i == len(self.denses) - 1:
x = self.drop(x)
return x
nets = {"cnn" : CNNet, "fcn" : FCNet} | models.py | import torch
import torch.nn as nn
from hyper import cfg
class Conv2dAuto(nn.Conv2d):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.padding = (self.kernel_size[0] // 2, self.kernel_size[1] // 2)
def activation_func(activation):
return nn.ModuleDict([
['relu', nn.ReLU(inplace=True)],
['leaky_relu', nn.LeakyReLU(negative_slope=0.01, inplace=True)],
['selu', nn.SELU(inplace=True)],
['none', nn.Identity()]
])[activation]
class CNNet(nn.Module):
def __init__(self, in_dim = 32, in_chans = 3, filters = [32, 32, 64, 128, 1], kernels = [5, 3, 3, 3, 1], dim_s = [1,1,2,2,1], activation = "selu", drop_p = 0.1):
super().__init__()
self.convs = nn.ModuleList([Conv2dAuto(in_channels = in_chans, out_channels = filters[0], stride = dim_s[0], kernel_size = kernels[0])] +
[Conv2dAuto(in_channels = filters[i-1], out_channels = filters[i], stride = dim_s[i], kernel_size = kernels[i]) for i in range(1, len(filters))])
self.batch_norms = nn.ModuleList([nn.BatchNorm2d(num_features= filters[i]) for i in range(len(filters))])
self.setup(torch.rand(1,in_chans, in_dim, in_dim))
self.denses = nn.ModuleList([nn.Linear(in_features = self.lin_in_shape, out_features=128),
nn.Linear(in_features = 128, out_features=4)])
self.act = activation_func(activation)
self.drop = nn.Dropout(p=drop_p)
def setup(self, x):
for l in self.convs:
x = l(x)
self.lin_in_shape = x.shape[-1]*x.shape[-2]*x.shape[-3]
def forward(self, x):
for l, b in zip(self.convs, self.batch_norms):
x = self.drop(self.act(b(l(x))))
x = x.view(-1, self.lin_in_shape)
x = self.denses[0](x)
x = self.act(x)
x = self.drop(x)
x = self.denses[1](x)
return x
class FCNet(nn.Module):
def __init__(self, in_dim = 32, in_chans = None, layer_dims = [128, 64, 4], activation = "selu", drop_p = 0.1):
super().__init__()
self.layer_dims = [in_dim] + layer_dims
self.denses = nn.ModuleList([nn.Linear(in_features = self.layer_dims[i], out_features = self.layer_dims[i+1]) for i in range(len(layer_dims))])
self.bns = nn.ModuleList([nn.BatchNorm1d(num_features= self.layer_dims[i + 1]) for i in range(len(layer_dims) - 1)])
self.act = activation_func(activation)
self.drop = nn.Dropout(p=drop_p)
def forward(self, x):
for i, l in enumerate(self.denses):
x = l(x)
if not i == len(self.denses) - 1:
x = self.bns[i](x)
x = self.act(x)
if not i == len(self.denses) - 1:
x = self.drop(x)
return x
nets = {"cnn" : CNNet, "fcn" : FCNet} | 0.923135 | 0.47524 |
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, Xilinx"
__email__ = "<EMAIL>"
from setuptools import setup, find_packages, Distribution
from setuptools.command.build_ext import build_ext
from distutils.file_util import copy_file
import os
# Requirement
required = [
'pynq>=2.6.0'
]
# Extend package files by directory or by file
def extend_package(data_list):
for data in data_list:
if os.path.isdir(data):
package_files.extend(
[os.path.join("..", root, f)
for root, _, files in os.walk(data) for f in files]
)
elif os.path.isfile(data):
package_files.append(os.path.join("..", data))
# Extend package files
package_files = []
extend_package(
["embeddedsw/XilinxProcessorIPLib/drivers/v_hdmi_common/src",
"embeddedsw/XilinxProcessorIPLib/drivers/v_hdmirxss/src",
"embeddedsw/XilinxProcessorIPLib/drivers/v_hdmirx/src",
"embeddedsw/XilinxProcessorIPLib/drivers/v_hdmitxss/src",
"embeddedsw/XilinxProcessorIPLib/drivers/v_hdmitx/src",
"embeddedsw/XilinxProcessorIPLib/drivers/video_common/src",
"embeddedsw/XilinxProcessorIPLib/drivers/vphy/src",
"embeddedsw/XilinxProcessorIPLib/drivers/vtc/src",
"embeddedsw/XilinxProcessorIPLib/drivers/iic/src",
"embeddedsw/XilinxProcessorIPLib/drivers/gpio/src",
"embeddedsw/XilinxProcessorIPLib/drivers/iicps/src",
"embeddedsw/XilinxProcessorIPLib/drivers/scugic/src",
"embeddedsw/XilinxProcessorIPLib/drivers/axivdma/src",
"embeddedsw/XilinxProcessorIPLib/drivers/mipicsiss/src",
"embeddedsw/XilinxProcessorIPLib/drivers/csi/src",
"embeddedsw/XilinxProcessorIPLib/drivers/dphy/src",
"embeddedsw/lib/bsp/standalone/src",
"embeddedsw_lib.mk",
"common",
"_pcam5c",
'pcam5c'
])
class BuildExtension(build_ext):
def run_make(self, src_path, dst_path, output_lib):
self.spawn(['make', 'PYNQ_BUILD_ARCH={}'.format("aarch64"),
'-C', src_path])
os.makedirs(os.path.join(self.build_lib, dst_path), exist_ok=True)
copy_file(src_path + output_lib,
os.path.join(self.build_lib, dst_path, output_lib))
def run(self):
self.run_make("_pcam5c/", "pcam5c", "libpcam5c.so")
build_ext.run(self)
# Enforce platform-dependent distribution
class BinaryDistribution(Distribution):
def has_ext_modules(self):
return True
setup(name='pynq_zu_pcam5c',
version="1.0",
description='pcam5 driver for Pynq-ZU',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/Xilinx/PYNQ-ZU',
packages=find_packages(),
cmdclass={
"build_ext": BuildExtension,
},
distclass=BinaryDistribution,
python_requires='>=3.6.0',
install_requires=required,
package_data={
'pcam5c': package_files,
},
entry_points={
"pynq.lib": "pcam5c = pcam5c"
},
ext_modules=[],
zip_safe=False,
license="BSD 3-Clause"
) | Pynq-ZU/packages/pcam5c/setup.py |
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, Xilinx"
__email__ = "<EMAIL>"
from setuptools import setup, find_packages, Distribution
from setuptools.command.build_ext import build_ext
from distutils.file_util import copy_file
import os
# Requirement
required = [
'pynq>=2.6.0'
]
# Extend package files by directory or by file
def extend_package(data_list):
for data in data_list:
if os.path.isdir(data):
package_files.extend(
[os.path.join("..", root, f)
for root, _, files in os.walk(data) for f in files]
)
elif os.path.isfile(data):
package_files.append(os.path.join("..", data))
# Extend package files
package_files = []
extend_package(
["embeddedsw/XilinxProcessorIPLib/drivers/v_hdmi_common/src",
"embeddedsw/XilinxProcessorIPLib/drivers/v_hdmirxss/src",
"embeddedsw/XilinxProcessorIPLib/drivers/v_hdmirx/src",
"embeddedsw/XilinxProcessorIPLib/drivers/v_hdmitxss/src",
"embeddedsw/XilinxProcessorIPLib/drivers/v_hdmitx/src",
"embeddedsw/XilinxProcessorIPLib/drivers/video_common/src",
"embeddedsw/XilinxProcessorIPLib/drivers/vphy/src",
"embeddedsw/XilinxProcessorIPLib/drivers/vtc/src",
"embeddedsw/XilinxProcessorIPLib/drivers/iic/src",
"embeddedsw/XilinxProcessorIPLib/drivers/gpio/src",
"embeddedsw/XilinxProcessorIPLib/drivers/iicps/src",
"embeddedsw/XilinxProcessorIPLib/drivers/scugic/src",
"embeddedsw/XilinxProcessorIPLib/drivers/axivdma/src",
"embeddedsw/XilinxProcessorIPLib/drivers/mipicsiss/src",
"embeddedsw/XilinxProcessorIPLib/drivers/csi/src",
"embeddedsw/XilinxProcessorIPLib/drivers/dphy/src",
"embeddedsw/lib/bsp/standalone/src",
"embeddedsw_lib.mk",
"common",
"_pcam5c",
'pcam5c'
])
class BuildExtension(build_ext):
def run_make(self, src_path, dst_path, output_lib):
self.spawn(['make', 'PYNQ_BUILD_ARCH={}'.format("aarch64"),
'-C', src_path])
os.makedirs(os.path.join(self.build_lib, dst_path), exist_ok=True)
copy_file(src_path + output_lib,
os.path.join(self.build_lib, dst_path, output_lib))
def run(self):
self.run_make("_pcam5c/", "pcam5c", "libpcam5c.so")
build_ext.run(self)
# Enforce platform-dependent distribution
class BinaryDistribution(Distribution):
def has_ext_modules(self):
return True
setup(name='pynq_zu_pcam5c',
version="1.0",
description='pcam5 driver for Pynq-ZU',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/Xilinx/PYNQ-ZU',
packages=find_packages(),
cmdclass={
"build_ext": BuildExtension,
},
distclass=BinaryDistribution,
python_requires='>=3.6.0',
install_requires=required,
package_data={
'pcam5c': package_files,
},
entry_points={
"pynq.lib": "pcam5c = pcam5c"
},
ext_modules=[],
zip_safe=False,
license="BSD 3-Clause"
) | 0.412175 | 0.067393 |
from typing import List
from .device import DeviceModel
from .gate import GateModel
from .room import RoomModel
from .sampleData import SampleDataModel
from .reciveData import ReciveDataModel
import json
import time
import copy
#Recive data format
#{"Gate":"BRAMKA2","Device":"DEVICE1","RSSI":-27}
class TestDataModel:
def __init__(self, dev: DeviceModel, gates: List[GateModel], room: RoomModel, samples=[]):
self.recive_data = None
self.device = dev
self.gates = gates
self.room = room
self.sample_data_list = samples
if len(samples) == 0:
self.addStartSamples()
def addSample(self, data):
try:
self.recive_data = ReciveDataModel(data["Gate"], data["Device"], data["RSSI"], time.strftime("%Y%m%d%H%M%S"))
except Exception as e:
print(e)
return
# Zwracanie id bramki w z której są dane
number_gate = 0
for g in self.gates:
if g.name == self.recive_data.gate_name:
break
else:
number_gate += 1
if number_gate > len(self.gates):
print("Not search gate {} in list {}".format(g.name, self.gates))
return
last_sample = copy.deepcopy(self.sample_data_list[-1])
last_sample.samples[number_gate] = self.recive_data.rssi
actual_sample = SampleDataModel(self.recive_data.time, last_sample.samples)
self.sample_data_list.append(actual_sample)
def getJson(self):
return {"Room":self.room.getJsonModel(),
"Device":self.device.getJson(),
"Gates": [g.getJson() for g in self.gates],
"Samples":[s.getJson() for s in self.sample_data_list]}
def getJsonText(self):
return json.dumps(self.getJson())
def addStartSamples(self):
iter_gates = len(self.gates)
for i in range(iter_gates):
sample = SampleDataModel(time.strftime("%Y%m%d%H%M%S"), [0 for x in range(iter_gates)])
self.sample_data_list.append(sample)
#Properties
@property
def gates(self):
return self._gates
@gates.setter
def gates(self, g):
if len(g) < 3:
raise Exception('To few gates')
else:
self._gates = g
def __str__(self):
return self.getJsonText() | src/db_interface/testData.py | from typing import List
from .device import DeviceModel
from .gate import GateModel
from .room import RoomModel
from .sampleData import SampleDataModel
from .reciveData import ReciveDataModel
import json
import time
import copy
#Recive data format
#{"Gate":"BRAMKA2","Device":"DEVICE1","RSSI":-27}
class TestDataModel:
def __init__(self, dev: DeviceModel, gates: List[GateModel], room: RoomModel, samples=[]):
self.recive_data = None
self.device = dev
self.gates = gates
self.room = room
self.sample_data_list = samples
if len(samples) == 0:
self.addStartSamples()
def addSample(self, data):
try:
self.recive_data = ReciveDataModel(data["Gate"], data["Device"], data["RSSI"], time.strftime("%Y%m%d%H%M%S"))
except Exception as e:
print(e)
return
# Zwracanie id bramki w z której są dane
number_gate = 0
for g in self.gates:
if g.name == self.recive_data.gate_name:
break
else:
number_gate += 1
if number_gate > len(self.gates):
print("Not search gate {} in list {}".format(g.name, self.gates))
return
last_sample = copy.deepcopy(self.sample_data_list[-1])
last_sample.samples[number_gate] = self.recive_data.rssi
actual_sample = SampleDataModel(self.recive_data.time, last_sample.samples)
self.sample_data_list.append(actual_sample)
def getJson(self):
return {"Room":self.room.getJsonModel(),
"Device":self.device.getJson(),
"Gates": [g.getJson() for g in self.gates],
"Samples":[s.getJson() for s in self.sample_data_list]}
def getJsonText(self):
return json.dumps(self.getJson())
def addStartSamples(self):
iter_gates = len(self.gates)
for i in range(iter_gates):
sample = SampleDataModel(time.strftime("%Y%m%d%H%M%S"), [0 for x in range(iter_gates)])
self.sample_data_list.append(sample)
#Properties
@property
def gates(self):
return self._gates
@gates.setter
def gates(self, g):
if len(g) < 3:
raise Exception('To few gates')
else:
self._gates = g
def __str__(self):
return self.getJsonText() | 0.529507 | 0.133387 |
import re,json,datetime,requests,encodings,os,weather,random
from multiprocessing import Process
from flask import Flask
from flask import request
import function,Baidu_Text_transAPI
def send_private_msg(user_id, message, group_id='', auto_escape='False'):
user_id = str(user_id)
message = str(message)
group_id = str(group_id)
if group_id == '': # 私聊
sand_message_user = "http://127.0.0.1:5700/" + "send_private_msg?" + "user_id=" + user_id + "&" + "message=" + message + "&" + "auto_ecscape=" + auto_escape
return_user = requests.post(sand_message_user)
else: # 通过群发
sand_message_user = "http://127.0.0.1:5700/" + "send_private_msg?" + "user_id=" + user_id + "&" + "group_id=" + group_id + "&" + "message=" + message + "&" + "auto_ecscape=" + auto_escape
return_user = requests.post(sand_message_user)
return return_user
# 发送群信息
def send_group_msg(group_id, message, auto_escape=''):
group_id = str(group_id)
message = str(message)
sand_message_user = "http://127.0.0.1:5700/" + "send_group_msg?" + "group_id=" + group_id + "&" + "message=" + message + "&" + "auto_ecscape=" + auto_escape
return_user = requests.post(sand_message_user)
return return_user
# 发送合并转发 ( 群 )
def send_group_forward_msg(group_id, messages):
sand_message_user = "http://127.0.0.1:5700/" + "send_group_forward_msg?" + "group_id=" + group_id + "&" + "message=" + messages
# 发送消息
# TODO 发送信息还在写
def send_msg(message,user_id='', group_id='', message_type='', auto_escape=''):
#sand_message_user =
pass
# 撤回消息
def delete_msg(message_id):
sand_message_user = "http://127.0.0.1:5700/" + "delete_msg?" + "message_id=" + message_id
return_user = requests.post(sand_message_user)
return return_user
# 获取消息
def get_msg(message_id):
sand_message_user = "http://127.0.0.1:5700/" + "set_group_kick?" + "message_id=" + message_id
return_user = requests.post(sand_message_user)
return return_user
# 这里是json信息,需要解析
"""
响应数据
字段 类型 说明
message_id int32 消息id
real_id int32 消息真实id
sender object 发送者
time int32 发送时间
message message 消息内容
raw_message message 原始消息内容
"""
# 获取合并转发内容
def get_forward_msg(message_id):
sand_message_user = "http://127.0.0.1:5700/" + "get_forward_msg?" + "message_id=" + message_id
return_user = requests.post(sand_message_user)
return return_user
# 此处同上
# 获取图片信息
def get_image(file):
sand_message_user = "http://127.0.0.1:5700/" + "get_image?" + "file=" + file
return_user = requests.post(sand_message_user)
return return_user
# 群组踢人
def set_group_kick(group_id, user_id, reject_add_request='false'):
sand_message_user = "http://127.0.0.1:5700/" + "set_group_kick?" + "group_if=" + group_id + "&" + "user_id=" + user_id + "&" + "reject_add_request=" + reject_add_request
return_user = requests.post(sand_message_user)
return return_user
# 群组单人禁言
def set_group_ban(group_id, user_id, duration='30 * 60'): # 最后一个参数为禁言时间,单位s,0秒表示取消禁言
sand_message_user = "http://127.0.0.1:5700/" + "set_group_ban?" + "group_id=" + group_id + "&" + "user_id=" + user_id + "&" + "duration=" + duration
return_user = requests.post(sand_message_user)
# 无响应数据
#晚间10:00报时加天气预报
def weather_sand(user_id, message, group_id='', auto_escape='False'):
message = weather.return_main()
user_id = str(user_id)
message = str(message)
group_id = str(group_id)
if group_id == '': # 私聊
sand_message_user = "http://127.0.0.1:5700/" + "send_private_msg?" + "user_id=" + user_id + "&" + "message=" + message + "&" + "auto_ecscape=" + auto_escape
return_user = requests.post(sand_message_user)
else: # 通过群发
sand_message_user = "http://127.0.0.1:5700/" + "send_private_msg?" + "user_id=" + user_id + "&" + "group_id=" + group_id + "&" + "message=" + message + "&" + "auto_ecscape=" + auto_escape
return_user = requests.post(sand_message_user)
return return_user
app = Flask(__name__)
@app.route("/", methods=["GET", "POST"])
def hello_world():
global time_i
main_into = request.json
post_type = main_into["post_type"]
if post_type == "message":
function.log_in(str(main_into))
message_input = main_into["message"]
message_type = main_into["message_type"]
check = str(filter(main_into, message_type))
with open('json__/main.json', 'r', encoding='utf-8') as main_json:
main_json_0 = json.load(main_json)
for key_0 in main_json_0:
value_0 = main_json_0[key_0]
if "g" in key_0:
mode_0 = value_0["mode"]
type_0 = mode_0["type"]
get_msg_0 = value_0["get_msg"]
from_0 = mode_0["from"]
if from_0 == "all":
if type_0 == "include":
if get_msg_0 in message_input:
event_0 = value_0["event"]
if event_0 == "g-s":
sand_msg_0 = value_0["return_msg"]
if message_type == "group":
group_id = main_into["group_id"]
return_0 = send_group_msg(group_id, sand_msg_0)
elif message_type == "private":
user_id = main_into["user_id"]
return_0 = send_private_msg(user_id, sand_msg_0)
else:
pass
else:
pass
elif type_0 == "exact":
if get_msg_0 == message_input:
event_0 = value_0["event"]
if event_0 == "g-s":
sand_msg_0 = value_0["return_msg"]
if message_type == "group":
group_id = main_into["group_id"]
return_0 = send_group_msg(group_id, sand_msg_0)
elif message_type == "private":
user_id = main_into["user_id"]
return_0 = send_private_msg(user_id, sand_msg_0)
else:
pass
else:
pass
else: # TODO 这里要加其他的mode
pass
elif from_0 == "private":
pass
elif from_0 == "group":
pass
else:
print("WRROW")
else:
pass
if message_input == "/help":
if message_type == "group":
group_id = main_into["group_id"]
message_out = "欢迎使用机器人小鸽(目前处于测试开发阶段)\n现有功能:\n1.AI(差不多可以用)\n2.签到(也在开发)\n3.天气预报(输入/天气即可获取)\n4.翻译(格式:/翻译:balabala(目前仅支持中翻英)\n#更多功能测试中,还未上线,有建议可以在空间下留言(格式:/留言:balabala)"
return_0 = send_group_msg(group_id, message_out)
else:
pass
elif message_input == "签到123456":
now_time = datetime.datetime.now().strftime('%R')
now_time_hour = datetime.datetime.now().strftime('%H')
now_time_hour_print = int(now_time_hour[-1])
ran_nub = str(random.randint(1, 9))
if 0 <= now_time_hour_print < 6:
time_i = "time1"
elif 6 <= now_time_hour_print < 12:
time_i = "time2"
elif 12 <= now_time_hour_print < 18:
time_i = "time3"
elif 18 <= now_time_hour_print <= 24:
time_i = "time4"
else:
print("ERROW:79")
time_0 = time_i
with open('json/yuju.json', 'r') as yuju:
yuju_input = json.load(yuju)
input_msg1 = yuju_input["qiandao"]
input_msg2 = input_msg1[time_0]
input_msg3 = input_msg2[ran_nub]
if message_type == "group":
group_id = main_into["group_id"]
message_out = input_msg3
return_0 = send_group_msg(group_id, message_out)
else:
pass
elif message_input == "/天气":
message_out = weather.main()
if message_type == "group":
group_id = main_into["group_id"]
return_0 = send_group_msg(group_id, message_out)
else:
user_id = main_into["user_id"]
return_0 = send_private_msg(user_id, message_out)
elif "/留言:" in message_input:
with open('liuyan.txt', 'a') as liuyan:
if message_type == "private":
user_id = main_into["user_id"]
write_into = "from:" + str(user_id) + "\n" + "message:" + str(message_input)
write_into.replace('/留言:', '')
liuyan.write(write_into)
message_out = "留言添加成功"
return_0 = send_private_msg(user_id, message_out)
else:
pass
elif "/fy/" in message_input:
query_0 = message_input.replace('/fy/', '')
return_ans = Baidu_Text_transAPI.transport(query_0)
return_ans2 = json.loads(return_ans)
ans_msg = return_ans2["trans_result"]
ans_msg_src = ans_msg[0]["src"]
ans_msg_dst = ans_msg[0]["dst"]
if message_type == "private":
user_id = main_into["user_id"]
message_out = "翻译:" + ans_msg_src + "\n" + "结果:" + ans_msg_dst
return_0 = send_private_msg(user_id, message_out)
else:
group_id = main_into["group_id"]
user_id = main_into["user_id"]
message_out = "[CQ:at,qq=" + str(
user_id) + "]" + "\n" + "翻译:" + ans_msg_src + "\n" + "结果:" + ans_msg_dst
return_0 = send_group_msg(group_id, message_out)
elif message_input == "/get_p_0":
message_out_1 = function.get_p_p_2()
if message_out_1 == "ERROR":
print("ERROW")
message_out_0 = "错误"
else:
message_out_0 = str(message_out_1)
message_out = "[CQ:image,file=" + message_out_0 + ",type=show,id=40000]"
print(message_out)
if message_type == "group":
group_id = main_into["group_id"]
return_0 = send_group_msg(group_id, message_out)
elif message_type == "private":
user_id = main_into["user_id"]
return_0 = send_private_msg(user_id, message_out)
elif message_input == "":
pass
elif message_input == "":
pass
elif message_input == "":
pass
elif message_input == "":
pass
elif message_input == "":
pass
elif message_input == "":
pass
elif message_input == "":
pass
elif "小鸽" in message_input:
if message_type == "group":
group_id = main_into["group_id"]
message_input1 = message_input.replace("小鸽", "")
message_out = function.ai_msg2(message_input1)
if str(message_out) == "None":
message_out = function.ai_msg(message_input)
else:
pass
return_0 = send_group_msg(group_id, message_out)
else:
pass
else:
if message_type == "private":
user_id = function.main_into["user_id"]
try:
message_input1 = message_input.replace("小鸽", "")
except:
pass
message_out = function.ai_msg2(message_input)
if str(message_out) == "None":
message_out = function.ai_msg(message_input)
else:
pass
return_0 = send_private_msg(user_id, message_out)
else:
pass
else:
pass
return ''
if __name__ == '__main__':
app.run(host='127.0.0.1',port=5701,debug=False) | try.py | import re,json,datetime,requests,encodings,os,weather,random
from multiprocessing import Process
from flask import Flask
from flask import request
import function,Baidu_Text_transAPI
def send_private_msg(user_id, message, group_id='', auto_escape='False'):
user_id = str(user_id)
message = str(message)
group_id = str(group_id)
if group_id == '': # 私聊
sand_message_user = "http://127.0.0.1:5700/" + "send_private_msg?" + "user_id=" + user_id + "&" + "message=" + message + "&" + "auto_ecscape=" + auto_escape
return_user = requests.post(sand_message_user)
else: # 通过群发
sand_message_user = "http://127.0.0.1:5700/" + "send_private_msg?" + "user_id=" + user_id + "&" + "group_id=" + group_id + "&" + "message=" + message + "&" + "auto_ecscape=" + auto_escape
return_user = requests.post(sand_message_user)
return return_user
# 发送群信息
def send_group_msg(group_id, message, auto_escape=''):
group_id = str(group_id)
message = str(message)
sand_message_user = "http://127.0.0.1:5700/" + "send_group_msg?" + "group_id=" + group_id + "&" + "message=" + message + "&" + "auto_ecscape=" + auto_escape
return_user = requests.post(sand_message_user)
return return_user
# 发送合并转发 ( 群 )
def send_group_forward_msg(group_id, messages):
sand_message_user = "http://127.0.0.1:5700/" + "send_group_forward_msg?" + "group_id=" + group_id + "&" + "message=" + messages
# 发送消息
# TODO 发送信息还在写
def send_msg(message,user_id='', group_id='', message_type='', auto_escape=''):
#sand_message_user =
pass
# 撤回消息
def delete_msg(message_id):
sand_message_user = "http://127.0.0.1:5700/" + "delete_msg?" + "message_id=" + message_id
return_user = requests.post(sand_message_user)
return return_user
# 获取消息
def get_msg(message_id):
sand_message_user = "http://127.0.0.1:5700/" + "set_group_kick?" + "message_id=" + message_id
return_user = requests.post(sand_message_user)
return return_user
# 这里是json信息,需要解析
"""
响应数据
字段 类型 说明
message_id int32 消息id
real_id int32 消息真实id
sender object 发送者
time int32 发送时间
message message 消息内容
raw_message message 原始消息内容
"""
# 获取合并转发内容
def get_forward_msg(message_id):
sand_message_user = "http://127.0.0.1:5700/" + "get_forward_msg?" + "message_id=" + message_id
return_user = requests.post(sand_message_user)
return return_user
# 此处同上
# 获取图片信息
def get_image(file):
sand_message_user = "http://127.0.0.1:5700/" + "get_image?" + "file=" + file
return_user = requests.post(sand_message_user)
return return_user
# 群组踢人
def set_group_kick(group_id, user_id, reject_add_request='false'):
sand_message_user = "http://127.0.0.1:5700/" + "set_group_kick?" + "group_if=" + group_id + "&" + "user_id=" + user_id + "&" + "reject_add_request=" + reject_add_request
return_user = requests.post(sand_message_user)
return return_user
# 群组单人禁言
def set_group_ban(group_id, user_id, duration='30 * 60'): # 最后一个参数为禁言时间,单位s,0秒表示取消禁言
sand_message_user = "http://127.0.0.1:5700/" + "set_group_ban?" + "group_id=" + group_id + "&" + "user_id=" + user_id + "&" + "duration=" + duration
return_user = requests.post(sand_message_user)
# 无响应数据
#晚间10:00报时加天气预报
def weather_sand(user_id, message, group_id='', auto_escape='False'):
message = weather.return_main()
user_id = str(user_id)
message = str(message)
group_id = str(group_id)
if group_id == '': # 私聊
sand_message_user = "http://127.0.0.1:5700/" + "send_private_msg?" + "user_id=" + user_id + "&" + "message=" + message + "&" + "auto_ecscape=" + auto_escape
return_user = requests.post(sand_message_user)
else: # 通过群发
sand_message_user = "http://127.0.0.1:5700/" + "send_private_msg?" + "user_id=" + user_id + "&" + "group_id=" + group_id + "&" + "message=" + message + "&" + "auto_ecscape=" + auto_escape
return_user = requests.post(sand_message_user)
return return_user
app = Flask(__name__)
@app.route("/", methods=["GET", "POST"])
def hello_world():
global time_i
main_into = request.json
post_type = main_into["post_type"]
if post_type == "message":
function.log_in(str(main_into))
message_input = main_into["message"]
message_type = main_into["message_type"]
check = str(filter(main_into, message_type))
with open('json__/main.json', 'r', encoding='utf-8') as main_json:
main_json_0 = json.load(main_json)
for key_0 in main_json_0:
value_0 = main_json_0[key_0]
if "g" in key_0:
mode_0 = value_0["mode"]
type_0 = mode_0["type"]
get_msg_0 = value_0["get_msg"]
from_0 = mode_0["from"]
if from_0 == "all":
if type_0 == "include":
if get_msg_0 in message_input:
event_0 = value_0["event"]
if event_0 == "g-s":
sand_msg_0 = value_0["return_msg"]
if message_type == "group":
group_id = main_into["group_id"]
return_0 = send_group_msg(group_id, sand_msg_0)
elif message_type == "private":
user_id = main_into["user_id"]
return_0 = send_private_msg(user_id, sand_msg_0)
else:
pass
else:
pass
elif type_0 == "exact":
if get_msg_0 == message_input:
event_0 = value_0["event"]
if event_0 == "g-s":
sand_msg_0 = value_0["return_msg"]
if message_type == "group":
group_id = main_into["group_id"]
return_0 = send_group_msg(group_id, sand_msg_0)
elif message_type == "private":
user_id = main_into["user_id"]
return_0 = send_private_msg(user_id, sand_msg_0)
else:
pass
else:
pass
else: # TODO 这里要加其他的mode
pass
elif from_0 == "private":
pass
elif from_0 == "group":
pass
else:
print("WRROW")
else:
pass
if message_input == "/help":
if message_type == "group":
group_id = main_into["group_id"]
message_out = "欢迎使用机器人小鸽(目前处于测试开发阶段)\n现有功能:\n1.AI(差不多可以用)\n2.签到(也在开发)\n3.天气预报(输入/天气即可获取)\n4.翻译(格式:/翻译:balabala(目前仅支持中翻英)\n#更多功能测试中,还未上线,有建议可以在空间下留言(格式:/留言:balabala)"
return_0 = send_group_msg(group_id, message_out)
else:
pass
elif message_input == "签到123456":
now_time = datetime.datetime.now().strftime('%R')
now_time_hour = datetime.datetime.now().strftime('%H')
now_time_hour_print = int(now_time_hour[-1])
ran_nub = str(random.randint(1, 9))
if 0 <= now_time_hour_print < 6:
time_i = "time1"
elif 6 <= now_time_hour_print < 12:
time_i = "time2"
elif 12 <= now_time_hour_print < 18:
time_i = "time3"
elif 18 <= now_time_hour_print <= 24:
time_i = "time4"
else:
print("ERROW:79")
time_0 = time_i
with open('json/yuju.json', 'r') as yuju:
yuju_input = json.load(yuju)
input_msg1 = yuju_input["qiandao"]
input_msg2 = input_msg1[time_0]
input_msg3 = input_msg2[ran_nub]
if message_type == "group":
group_id = main_into["group_id"]
message_out = input_msg3
return_0 = send_group_msg(group_id, message_out)
else:
pass
elif message_input == "/天气":
message_out = weather.main()
if message_type == "group":
group_id = main_into["group_id"]
return_0 = send_group_msg(group_id, message_out)
else:
user_id = main_into["user_id"]
return_0 = send_private_msg(user_id, message_out)
elif "/留言:" in message_input:
with open('liuyan.txt', 'a') as liuyan:
if message_type == "private":
user_id = main_into["user_id"]
write_into = "from:" + str(user_id) + "\n" + "message:" + str(message_input)
write_into.replace('/留言:', '')
liuyan.write(write_into)
message_out = "留言添加成功"
return_0 = send_private_msg(user_id, message_out)
else:
pass
elif "/fy/" in message_input:
query_0 = message_input.replace('/fy/', '')
return_ans = Baidu_Text_transAPI.transport(query_0)
return_ans2 = json.loads(return_ans)
ans_msg = return_ans2["trans_result"]
ans_msg_src = ans_msg[0]["src"]
ans_msg_dst = ans_msg[0]["dst"]
if message_type == "private":
user_id = main_into["user_id"]
message_out = "翻译:" + ans_msg_src + "\n" + "结果:" + ans_msg_dst
return_0 = send_private_msg(user_id, message_out)
else:
group_id = main_into["group_id"]
user_id = main_into["user_id"]
message_out = "[CQ:at,qq=" + str(
user_id) + "]" + "\n" + "翻译:" + ans_msg_src + "\n" + "结果:" + ans_msg_dst
return_0 = send_group_msg(group_id, message_out)
elif message_input == "/get_p_0":
message_out_1 = function.get_p_p_2()
if message_out_1 == "ERROR":
print("ERROW")
message_out_0 = "错误"
else:
message_out_0 = str(message_out_1)
message_out = "[CQ:image,file=" + message_out_0 + ",type=show,id=40000]"
print(message_out)
if message_type == "group":
group_id = main_into["group_id"]
return_0 = send_group_msg(group_id, message_out)
elif message_type == "private":
user_id = main_into["user_id"]
return_0 = send_private_msg(user_id, message_out)
elif message_input == "":
pass
elif message_input == "":
pass
elif message_input == "":
pass
elif message_input == "":
pass
elif message_input == "":
pass
elif message_input == "":
pass
elif message_input == "":
pass
elif "小鸽" in message_input:
if message_type == "group":
group_id = main_into["group_id"]
message_input1 = message_input.replace("小鸽", "")
message_out = function.ai_msg2(message_input1)
if str(message_out) == "None":
message_out = function.ai_msg(message_input)
else:
pass
return_0 = send_group_msg(group_id, message_out)
else:
pass
else:
if message_type == "private":
user_id = function.main_into["user_id"]
try:
message_input1 = message_input.replace("小鸽", "")
except:
pass
message_out = function.ai_msg2(message_input)
if str(message_out) == "None":
message_out = function.ai_msg(message_input)
else:
pass
return_0 = send_private_msg(user_id, message_out)
else:
pass
else:
pass
return ''
if __name__ == '__main__':
app.run(host='127.0.0.1',port=5701,debug=False) | 0.07922 | 0.047492 |
Modifiers = {
u'PR':0x0002,
u'PB':0x0001,
u'PRO':0x000,
u'AB':0x0400,
u'ST':0x0008,
u'FN':0x0010,
# don't think there are any with these values
u'OP':0x8000, # optional
u'HN':0x10000, # harness
u'GN':0x20000, # generator
u'AT':0x40000, # adt
}
Operators = {
u'OR':u'||',
u'AND':u'&&',
u'BINOR':u'|',
u'BINAND':u'&',
u'XOR':u'^',
u'EQUALS':u'==',
u'NOTEQUALS':u'!=',
u'LESS':u'<',
u'GREATER':u'>',
u'LESSEQUALS':u'<=',
u'GREATEREQUALS':u'>=',
u'LSHIFT':u'<<',
u'RSIGNEDSHIFT':u'>>',
u'RUNSIGNEDSHIFT':u'>>>',
u'PLUS':u'+',
u'MINUS':u'-',
u'TIMES':u'*',
u'DIVIDE':u'/',
u'REMAINDER':u'%',
}
AssignOperators = {
u'ASSIGN':u'=',
u'STAR':u'*=',
u'SLASH':u'/=',
u'REM':u'%=',
u'PLUS':u'+=',
u'MINUS':u'-=',
u'LSHIFT':u'<<=',
u'RSIGNEDSHIFT':u'>>=',
u'RUNSIGNEDSHIFT':u'>>>=',
u'AND':u'&=',
u'XOR':u'^=',
u'OR':u'|=',
# expand for Sketch
u'_STAR':u'*',
u'_SLASH':u'/',
u'_REM':u'%',
u'_PLUS':u'+',
u'_MINUS':u'-',
u'_LSHIFT':u'<<',
u'_RSIGNEDSHIFT':u'>>',
u'_RUNSIGNEDSHIFT':u'>>>',
u'_AND':u'&',
u'_XOR':u'^',
u'_OR':u'|',
}
field = {
u"@t": u"FieldDeclaration",
u"type": {
u"@t": u"ReferenceType",
u"type": {
u"@t": u"ClassOrInterfaceType",
u"name": u"A",
},
},
u"variables": {
u"@e": [
{
u"@t": u"VariableDeclarator",
u"id": {
u"name": u"a",
},
}
]
},
}
JAVA_LANG = [
# interfaces
u'java.lang.Appendable',
u'java.lang.AutoCloseable',
u'java.lang.CharSequence',
u'java.lang.Cloneable',
u'java.lang.Comparable',
u'java.lang.Iterable',
u'java.lang.Readable',
u'java.lang.Runnable',
# u'java.lang.Thread$UncaughtExceptionHandler',
# classes
u'java.lang.Boolean',
u'java.lang.Byte',
u'java.lang.Character',
# u'java.lang.Character$Subset',
# u'java.lang.Character$UnicodeBlock'
u'java.lang.Class',
u'java.lang.ClassLoader',
u'java.lang.ClassValue',
u'java.lang.Compiler',
u'java.lang.Double',
u'java.lang.Enum',
u'java.lang.Float',
u'java.lang.InheritableThreadLocal',
u'java.lang.Integer',
u'java.lang.Long',
u'java.lang.Math',
u'java.lang.Number',
# u'java.lang.Object',
u'java.lang.Package',
u'java.lang.Process',
u'java.lang.ProcessBuilder',
# u'java.lang.ProcessBuilder$Redirect',
u'java.lang.Runtime',
u'java.lang.RuntimePermission',
u'java.lang.SecurityManager',
u'java.lang.Short',
u'java.lang.StackTraceElement',
u'java.lang.StrictMath',
u'java.lang.String',
u'java.lang.StringBuffer',
u'java.lang.StringBuilder',
u'java.lang.System',
u'java.lang.Thread',
u'java.lang.ThreadGroup',
u'java.lang.ThreadLocal',
u'java.lang.Throwable',
u'java.lang.Void',
]
DESCRIPTOR_TYPES = {
u'B': u'byte', # signed byte
u'C': u'char', # Unicode character code point in the Basic Multilingual Plane, encoded with UTF-16
u'D': u'double', # double-precision floating-point value
u'F': u'float', # single-precision floating-point value
u'I': u'int', # integer
u'J': u'long', # long integer
u'L': u'ClassName', # ;referencean instance of class ClassName
u'S': u'short', # signed short
u'V': u'void', # void
u'Z': u'boolean', # true or false
u'[': u'reference', # one array dimension
}
PRIMITIVES = [u'void', u'bit', u'boolean', u'this', u'char', u'byte', u'short', u'int', u'long', u'float', u'double',]
def _import():
from .importdeclaration import ImportDeclaration
from .body.classorinterfacedeclaration import ClassOrInterfaceDeclaration
from .body.fielddeclaration import FieldDeclaration
from .body.methoddeclaration import MethodDeclaration
from .body.axiomdeclaration import AxiomDeclaration
from .expr.nameexpr import NameExpr
from .expr.qualifiednameexpr import QualifiedNameExpr
from .type.referencetype import ReferenceType
from .comments.javadoccomment import JavadocComment
from .comments.linecomment import LineComment
from .comments.blockcomment import BlockComment
return locals() | jskparser/ast/__init__.py | Modifiers = {
u'PR':0x0002,
u'PB':0x0001,
u'PRO':0x000,
u'AB':0x0400,
u'ST':0x0008,
u'FN':0x0010,
# don't think there are any with these values
u'OP':0x8000, # optional
u'HN':0x10000, # harness
u'GN':0x20000, # generator
u'AT':0x40000, # adt
}
Operators = {
u'OR':u'||',
u'AND':u'&&',
u'BINOR':u'|',
u'BINAND':u'&',
u'XOR':u'^',
u'EQUALS':u'==',
u'NOTEQUALS':u'!=',
u'LESS':u'<',
u'GREATER':u'>',
u'LESSEQUALS':u'<=',
u'GREATEREQUALS':u'>=',
u'LSHIFT':u'<<',
u'RSIGNEDSHIFT':u'>>',
u'RUNSIGNEDSHIFT':u'>>>',
u'PLUS':u'+',
u'MINUS':u'-',
u'TIMES':u'*',
u'DIVIDE':u'/',
u'REMAINDER':u'%',
}
AssignOperators = {
u'ASSIGN':u'=',
u'STAR':u'*=',
u'SLASH':u'/=',
u'REM':u'%=',
u'PLUS':u'+=',
u'MINUS':u'-=',
u'LSHIFT':u'<<=',
u'RSIGNEDSHIFT':u'>>=',
u'RUNSIGNEDSHIFT':u'>>>=',
u'AND':u'&=',
u'XOR':u'^=',
u'OR':u'|=',
# expand for Sketch
u'_STAR':u'*',
u'_SLASH':u'/',
u'_REM':u'%',
u'_PLUS':u'+',
u'_MINUS':u'-',
u'_LSHIFT':u'<<',
u'_RSIGNEDSHIFT':u'>>',
u'_RUNSIGNEDSHIFT':u'>>>',
u'_AND':u'&',
u'_XOR':u'^',
u'_OR':u'|',
}
field = {
u"@t": u"FieldDeclaration",
u"type": {
u"@t": u"ReferenceType",
u"type": {
u"@t": u"ClassOrInterfaceType",
u"name": u"A",
},
},
u"variables": {
u"@e": [
{
u"@t": u"VariableDeclarator",
u"id": {
u"name": u"a",
},
}
]
},
}
JAVA_LANG = [
# interfaces
u'java.lang.Appendable',
u'java.lang.AutoCloseable',
u'java.lang.CharSequence',
u'java.lang.Cloneable',
u'java.lang.Comparable',
u'java.lang.Iterable',
u'java.lang.Readable',
u'java.lang.Runnable',
# u'java.lang.Thread$UncaughtExceptionHandler',
# classes
u'java.lang.Boolean',
u'java.lang.Byte',
u'java.lang.Character',
# u'java.lang.Character$Subset',
# u'java.lang.Character$UnicodeBlock'
u'java.lang.Class',
u'java.lang.ClassLoader',
u'java.lang.ClassValue',
u'java.lang.Compiler',
u'java.lang.Double',
u'java.lang.Enum',
u'java.lang.Float',
u'java.lang.InheritableThreadLocal',
u'java.lang.Integer',
u'java.lang.Long',
u'java.lang.Math',
u'java.lang.Number',
# u'java.lang.Object',
u'java.lang.Package',
u'java.lang.Process',
u'java.lang.ProcessBuilder',
# u'java.lang.ProcessBuilder$Redirect',
u'java.lang.Runtime',
u'java.lang.RuntimePermission',
u'java.lang.SecurityManager',
u'java.lang.Short',
u'java.lang.StackTraceElement',
u'java.lang.StrictMath',
u'java.lang.String',
u'java.lang.StringBuffer',
u'java.lang.StringBuilder',
u'java.lang.System',
u'java.lang.Thread',
u'java.lang.ThreadGroup',
u'java.lang.ThreadLocal',
u'java.lang.Throwable',
u'java.lang.Void',
]
DESCRIPTOR_TYPES = {
u'B': u'byte', # signed byte
u'C': u'char', # Unicode character code point in the Basic Multilingual Plane, encoded with UTF-16
u'D': u'double', # double-precision floating-point value
u'F': u'float', # single-precision floating-point value
u'I': u'int', # integer
u'J': u'long', # long integer
u'L': u'ClassName', # ;referencean instance of class ClassName
u'S': u'short', # signed short
u'V': u'void', # void
u'Z': u'boolean', # true or false
u'[': u'reference', # one array dimension
}
PRIMITIVES = [u'void', u'bit', u'boolean', u'this', u'char', u'byte', u'short', u'int', u'long', u'float', u'double',]
def _import():
from .importdeclaration import ImportDeclaration
from .body.classorinterfacedeclaration import ClassOrInterfaceDeclaration
from .body.fielddeclaration import FieldDeclaration
from .body.methoddeclaration import MethodDeclaration
from .body.axiomdeclaration import AxiomDeclaration
from .expr.nameexpr import NameExpr
from .expr.qualifiednameexpr import QualifiedNameExpr
from .type.referencetype import ReferenceType
from .comments.javadoccomment import JavadocComment
from .comments.linecomment import LineComment
from .comments.blockcomment import BlockComment
return locals() | 0.401219 | 0.146789 |
import click
import torchvision.models.resnet as resnet
from artlearn.common_utils import (
LOG_DIR, MODEL_DIR,
get_dataloaders, ArtistLearner
)
@click.command()
@click.option('--mode', type=str, default='sgd',
help='Which optimizer you wish to use, currently supports '
'SGD and ADAM. Default is SGD.')
@click.option('-e', '--epochs', type=int, default=80,
help='Number of epochs with which to train. Default is 80.')
@click.option('-l', '--lr', type=float, default=1e-3,
help='The learning rate to use for the optimizer. '
'Default is 1e-3.')
@click.option('-m', '--momentum', type=float, default=0.9,
help='If using SGD, the momentum to use. Default is 0.9.')
@click.option('-a', '--log-after', type=int, default=80,
help='Number of iterations within an epoch to log out stats '
'after. Default is 80.')
@click.option('--log-path', envvar='ART_LOG_PATH', type=str, default=LOG_DIR,
help='Absolute path to write logs out to.')
@click.option('--model-path', envvar='ART_MODEL_PATH', type=str,
default=MODEL_DIR,
help='Absolute path to write model files out to.')
@click.option('-n', '--name', type=str,
help='Name override for the model and log files. Otherwise, '
'named after its parameters in the form: '
'{mode}_e_{epochs}_lr_{lr}_m_{momentum}')
@click.option('-p', '--pretrained', is_flag=True)
def train(mode, epochs, lr, momentum, log_after, log_path, model_path, name,
pretrained):
train, test, val = get_dataloaders()
network = resnet.resnet18(pretrained=pretrained)
learner = ArtistLearner(network, mode, epochs, train, test, val, lr=lr,
momentum=momentum, log_after=log_after,
log_path=log_path, model_path=model_path,
model_name=name)
learner.train_and_validate()
def main():
train()
if __name__ == '__main__':
main() | artlearn/cli/command.py | import click
import torchvision.models.resnet as resnet
from artlearn.common_utils import (
LOG_DIR, MODEL_DIR,
get_dataloaders, ArtistLearner
)
@click.command()
@click.option('--mode', type=str, default='sgd',
help='Which optimizer you wish to use, currently supports '
'SGD and ADAM. Default is SGD.')
@click.option('-e', '--epochs', type=int, default=80,
help='Number of epochs with which to train. Default is 80.')
@click.option('-l', '--lr', type=float, default=1e-3,
help='The learning rate to use for the optimizer. '
'Default is 1e-3.')
@click.option('-m', '--momentum', type=float, default=0.9,
help='If using SGD, the momentum to use. Default is 0.9.')
@click.option('-a', '--log-after', type=int, default=80,
help='Number of iterations within an epoch to log out stats '
'after. Default is 80.')
@click.option('--log-path', envvar='ART_LOG_PATH', type=str, default=LOG_DIR,
help='Absolute path to write logs out to.')
@click.option('--model-path', envvar='ART_MODEL_PATH', type=str,
default=MODEL_DIR,
help='Absolute path to write model files out to.')
@click.option('-n', '--name', type=str,
help='Name override for the model and log files. Otherwise, '
'named after its parameters in the form: '
'{mode}_e_{epochs}_lr_{lr}_m_{momentum}')
@click.option('-p', '--pretrained', is_flag=True)
def train(mode, epochs, lr, momentum, log_after, log_path, model_path, name,
pretrained):
train, test, val = get_dataloaders()
network = resnet.resnet18(pretrained=pretrained)
learner = ArtistLearner(network, mode, epochs, train, test, val, lr=lr,
momentum=momentum, log_after=log_after,
log_path=log_path, model_path=model_path,
model_name=name)
learner.train_and_validate()
def main():
train()
if __name__ == '__main__':
main() | 0.751648 | 0.109777 |
import logging
import unittest
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.executive_mock import MockExecutive2
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.system.systemhost_mock import MockSystemHost
from webkitpy.port import Port
from webkitpy.port.server_process_mock import MockServerProcess
from webkitpy.port.xvfbdriver import XvfbDriver
from webkitpy.tool.mocktool import MockOptions
_log = logging.getLogger(__name__)
class XvfbDriverTest(unittest.TestCase):
def make_driver(self, worker_number=0, xorg_running=False, executive=None):
port = Port(MockSystemHost(log_executive=True, executive=executive), 'xvfbdrivertestport', options=MockOptions(configuration='Release'))
port._config.build_directory = lambda configuration: "/mock-build"
port._test_runner_process_constructor = MockServerProcess
if xorg_running:
port._executive._running_pids['Xorg'] = 108
driver = XvfbDriver(port, worker_number=worker_number, pixel_tests=True)
driver._startup_delay_secs = 0
driver._xvfb_screen_depth = lambda: '24'
driver._xvfb_pipe = lambda: (3, 4)
driver._xvfb_read_display_id = lambda x: 1
driver._xvfb_close_pipe = lambda p: None
driver._port_server_environment = port.setup_environ_for_server(port.driver_name())
return driver
def cleanup_driver(self, driver):
# Setting _xvfb_process member to None is necessary as the Driver object is stopped on deletion,
# killing the Xvfb process if present. Thus, this method should only be called from tests that do not
# intend to test the behavior of XvfbDriver.stop.
driver._xvfb_process = None
def assertDriverStartSuccessful(self, driver, expected_logs, expected_display, pixel_tests=False):
OutputCapture().assert_outputs(self, driver.start, [pixel_tests, []], expected_logs=expected_logs)
self.assertTrue(driver._server_process.started)
self.assertEqual(driver._server_process.env['DISPLAY'], expected_display)
self.assertEqual(driver._server_process.env['GDK_BACKEND'], 'x11')
def test_start(self):
driver = self.make_driver()
expected_logs = ("MOCK popen: ['Xvfb', '-displayfd', '4', '-screen', '0', '1024x768x24', '-nolisten', 'tcp'], env=%s\n" % driver._port_server_environment)
self.assertDriverStartSuccessful(driver, expected_logs=expected_logs, expected_display=":1")
self.cleanup_driver(driver)
def test_start_arbitrary_worker_number(self):
driver = self.make_driver(worker_number=17)
expected_logs = ("MOCK popen: ['Xvfb', '-displayfd', '4', '-screen', '0', '1024x768x24', '-nolisten', 'tcp'], env=%s\n" % driver._port_server_environment)
self.assertDriverStartSuccessful(driver, expected_logs=expected_logs, expected_display=":1", pixel_tests=True)
self.cleanup_driver(driver)
def test_stop(self):
port = Port(MockSystemHost(log_executive=True), 'xvfbdrivertestport', options=MockOptions(configuration='Release'))
port._executive.kill_process = lambda x: _log.info("MOCK kill_process pid: " + str(x))
driver = XvfbDriver(port, worker_number=0, pixel_tests=True)
class FakeXvfbProcess(object):
pid = 1234
driver._xvfb_process = FakeXvfbProcess()
expected_logs = "MOCK kill_process pid: 1234\n"
OutputCapture().assert_outputs(self, driver.stop, [], expected_logs=expected_logs)
self.assertIsNone(driver._xvfb_process) | Tools/Scripts/webkitpy/port/xvfbdriver_unittest.py |
import logging
import unittest
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.executive_mock import MockExecutive2
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.system.systemhost_mock import MockSystemHost
from webkitpy.port import Port
from webkitpy.port.server_process_mock import MockServerProcess
from webkitpy.port.xvfbdriver import XvfbDriver
from webkitpy.tool.mocktool import MockOptions
_log = logging.getLogger(__name__)
class XvfbDriverTest(unittest.TestCase):
def make_driver(self, worker_number=0, xorg_running=False, executive=None):
port = Port(MockSystemHost(log_executive=True, executive=executive), 'xvfbdrivertestport', options=MockOptions(configuration='Release'))
port._config.build_directory = lambda configuration: "/mock-build"
port._test_runner_process_constructor = MockServerProcess
if xorg_running:
port._executive._running_pids['Xorg'] = 108
driver = XvfbDriver(port, worker_number=worker_number, pixel_tests=True)
driver._startup_delay_secs = 0
driver._xvfb_screen_depth = lambda: '24'
driver._xvfb_pipe = lambda: (3, 4)
driver._xvfb_read_display_id = lambda x: 1
driver._xvfb_close_pipe = lambda p: None
driver._port_server_environment = port.setup_environ_for_server(port.driver_name())
return driver
def cleanup_driver(self, driver):
# Setting _xvfb_process member to None is necessary as the Driver object is stopped on deletion,
# killing the Xvfb process if present. Thus, this method should only be called from tests that do not
# intend to test the behavior of XvfbDriver.stop.
driver._xvfb_process = None
def assertDriverStartSuccessful(self, driver, expected_logs, expected_display, pixel_tests=False):
OutputCapture().assert_outputs(self, driver.start, [pixel_tests, []], expected_logs=expected_logs)
self.assertTrue(driver._server_process.started)
self.assertEqual(driver._server_process.env['DISPLAY'], expected_display)
self.assertEqual(driver._server_process.env['GDK_BACKEND'], 'x11')
def test_start(self):
driver = self.make_driver()
expected_logs = ("MOCK popen: ['Xvfb', '-displayfd', '4', '-screen', '0', '1024x768x24', '-nolisten', 'tcp'], env=%s\n" % driver._port_server_environment)
self.assertDriverStartSuccessful(driver, expected_logs=expected_logs, expected_display=":1")
self.cleanup_driver(driver)
def test_start_arbitrary_worker_number(self):
driver = self.make_driver(worker_number=17)
expected_logs = ("MOCK popen: ['Xvfb', '-displayfd', '4', '-screen', '0', '1024x768x24', '-nolisten', 'tcp'], env=%s\n" % driver._port_server_environment)
self.assertDriverStartSuccessful(driver, expected_logs=expected_logs, expected_display=":1", pixel_tests=True)
self.cleanup_driver(driver)
def test_stop(self):
port = Port(MockSystemHost(log_executive=True), 'xvfbdrivertestport', options=MockOptions(configuration='Release'))
port._executive.kill_process = lambda x: _log.info("MOCK kill_process pid: " + str(x))
driver = XvfbDriver(port, worker_number=0, pixel_tests=True)
class FakeXvfbProcess(object):
pid = 1234
driver._xvfb_process = FakeXvfbProcess()
expected_logs = "MOCK kill_process pid: 1234\n"
OutputCapture().assert_outputs(self, driver.stop, [], expected_logs=expected_logs)
self.assertIsNone(driver._xvfb_process) | 0.596786 | 0.260683 |
# standard library imports
from string import ascii_uppercase, digits
from typing import Tuple
# third-party imports
from iso3166 import countries
# local imports
from .identifier import Identifier
from .luhn import luhn
from .micutils import get_mic_record
_ALPHABET = digits + ascii_uppercase
class MIC(Identifier):
"""Market Identifier Code
A unique identification code used to identify securities trading
exchanges, regulated and non-regulated trading markets.
Each MIC is a four alpha character code, defined in ISO 10383.
"""
__slots__ = ()
# noinspection PyMissingConstructor
def __init__(self, mic: str) -> None:
"""
Args:
mic (str): string representation of the MIC
Returns:
:class:`MIC` instance
Raises:
TypeError: given `mic` is not an instance of str
ValueError: given `mic` not found in the registry
"""
if not isinstance(mic, str):
raise TypeError("Argument must be instance of 'str'.")
mic = mic.strip()
try:
get_mic_record(mic)
except KeyError:
raise ValueError(f"Unknown MIC: '{mic}'.")
self._id = mic
def __str__(self) -> str:
"""str(self)"""
return self._id
class ISIN(Identifier):
"""International Securities Identification Number
An International Securities Identification Number uniquely identifies a
tradable financial asset, a.k.a security.
As defined in ISO 6166, each ISIN consists of a two-letter ISO 3166-1
Country Code for the issuing country, followed by nine alpha-numeric
characters (the National Securities Identifying Number, or NSIN, which
identifies the security), and one numerical check digit, calculated by the
Luhn algorithm.
"""
__slots__ = ()
@staticmethod
def calc_check_digit(country_code: str, nsin: str) -> str:
"""Calculate ISIN check digit."""
return str(luhn(country_code + nsin))
@property
def country_code(self) -> str:
"""Return the ISIN's Country Code."""
return self._id[:2]
@property
def check_digit(self) -> str:
"""Return the ISIN's check digits."""
return self._id[-1]
@property
def nsin(self) -> str:
"""Return the ISIN's National Securities Identifying Number."""
return self._id[2:-1]
def elements(self) -> Tuple[str, str, str]:
"""Return the ISIN's Country Code, National Securities Identifying
Number and check digit as tuple."""
return self.country_code, self.nsin, self.check_digit
# noinspection PyMissingConstructor
def __init__(self, *args: str) -> None:
"""Instances of :class:`ISIN` can be created in two ways, by providing
a Unicode string representation of an ISIN or by providing a country
code and a national securities identifying number.
**1. Form**
Args:
isin (str): string representation of an ISIN
Returns:
instance of :class:`ISIN`
Raises:
TypeError: given `isin` is not a `Unicode string`
ValueError: given `isin` contains an unknown country code
ValueError: given `isin` contains a wrong check digit
ValueError: given `isin` must be 12 characters long
ValueError: given `isin` contains invalid character(s)
**2. Form**
Args:
country_code (str): 2-character country code
according to ISO 3166
nsin (str): national securities identifying
number
Returns:
instance of :class:`ISIN`
Raises:
TypeError: invalid number of arguments
TypeError: given `country_code` is not a `Unicode string`
ValueError: given `country_code` contains an invalid or unknown
country code
TypeError: given `nsin` is not a `Unicode string`
ValueError: length of given `nsin` not valid
ValueError: given `nsin` contains invalid character(s)
"""
n_args = len(args)
if n_args == 1:
arg0 = args[0]
if not isinstance(arg0, str):
raise TypeError("Argument must be instance of 'str'.")
arg0 = arg0.strip()
if len(arg0) != 12:
raise ValueError('Invalid ISIN format: '
'given string must be 12 characters long.')
country_code = arg0[:2]
try:
countries.get(country_code)
except KeyError:
raise ValueError(f"Unknown country code: '{country_code}'.")
nsin = arg0[2:-1]
check_digit = self.__class__.calc_check_digit(country_code, nsin)
if check_digit != arg0[-1]:
raise ValueError("Wrong check digit; should be "
f"'{check_digit}'.")
self._id = arg0
elif n_args == 2:
arg0 = args[0]
if not isinstance(arg0, str):
raise TypeError("Country code must be instance of 'str'.")
if len(arg0) != 2:
raise ValueError("Country code must be a 2-character string.")
country_code = arg0
try:
countries.get(country_code)
except KeyError:
raise ValueError(f"Unknown country code: '{country_code}'.")
arg1 = args[1]
if isinstance(arg1, str):
len_nsin = len(arg1)
if len_nsin == 9:
nsin = arg1
elif 6 <= len_nsin < 9:
nsin = arg1.rjust(9, '0')
else:
raise ValueError("Given NSIN must contain between 6 and 9"
" characters.")
else:
raise TypeError("Given nsin must be instance of 'str'.")
check_digit = self.__class__.calc_check_digit(country_code, nsin)
self._id = ''.join((country_code, nsin, check_digit))
else:
raise TypeError('Invalid number of arguments.')
def __str__(self) -> str:
"""str(self)"""
return self._id | src/identifiers/finance.py | # standard library imports
from string import ascii_uppercase, digits
from typing import Tuple
# third-party imports
from iso3166 import countries
# local imports
from .identifier import Identifier
from .luhn import luhn
from .micutils import get_mic_record
_ALPHABET = digits + ascii_uppercase
class MIC(Identifier):
"""Market Identifier Code
A unique identification code used to identify securities trading
exchanges, regulated and non-regulated trading markets.
Each MIC is a four alpha character code, defined in ISO 10383.
"""
__slots__ = ()
# noinspection PyMissingConstructor
def __init__(self, mic: str) -> None:
"""
Args:
mic (str): string representation of the MIC
Returns:
:class:`MIC` instance
Raises:
TypeError: given `mic` is not an instance of str
ValueError: given `mic` not found in the registry
"""
if not isinstance(mic, str):
raise TypeError("Argument must be instance of 'str'.")
mic = mic.strip()
try:
get_mic_record(mic)
except KeyError:
raise ValueError(f"Unknown MIC: '{mic}'.")
self._id = mic
def __str__(self) -> str:
"""str(self)"""
return self._id
class ISIN(Identifier):
"""International Securities Identification Number
An International Securities Identification Number uniquely identifies a
tradable financial asset, a.k.a security.
As defined in ISO 6166, each ISIN consists of a two-letter ISO 3166-1
Country Code for the issuing country, followed by nine alpha-numeric
characters (the National Securities Identifying Number, or NSIN, which
identifies the security), and one numerical check digit, calculated by the
Luhn algorithm.
"""
__slots__ = ()
@staticmethod
def calc_check_digit(country_code: str, nsin: str) -> str:
"""Calculate ISIN check digit."""
return str(luhn(country_code + nsin))
@property
def country_code(self) -> str:
"""Return the ISIN's Country Code."""
return self._id[:2]
@property
def check_digit(self) -> str:
"""Return the ISIN's check digits."""
return self._id[-1]
@property
def nsin(self) -> str:
"""Return the ISIN's National Securities Identifying Number."""
return self._id[2:-1]
def elements(self) -> Tuple[str, str, str]:
"""Return the ISIN's Country Code, National Securities Identifying
Number and check digit as tuple."""
return self.country_code, self.nsin, self.check_digit
# noinspection PyMissingConstructor
def __init__(self, *args: str) -> None:
"""Instances of :class:`ISIN` can be created in two ways, by providing
a Unicode string representation of an ISIN or by providing a country
code and a national securities identifying number.
**1. Form**
Args:
isin (str): string representation of an ISIN
Returns:
instance of :class:`ISIN`
Raises:
TypeError: given `isin` is not a `Unicode string`
ValueError: given `isin` contains an unknown country code
ValueError: given `isin` contains a wrong check digit
ValueError: given `isin` must be 12 characters long
ValueError: given `isin` contains invalid character(s)
**2. Form**
Args:
country_code (str): 2-character country code
according to ISO 3166
nsin (str): national securities identifying
number
Returns:
instance of :class:`ISIN`
Raises:
TypeError: invalid number of arguments
TypeError: given `country_code` is not a `Unicode string`
ValueError: given `country_code` contains an invalid or unknown
country code
TypeError: given `nsin` is not a `Unicode string`
ValueError: length of given `nsin` not valid
ValueError: given `nsin` contains invalid character(s)
"""
n_args = len(args)
if n_args == 1:
arg0 = args[0]
if not isinstance(arg0, str):
raise TypeError("Argument must be instance of 'str'.")
arg0 = arg0.strip()
if len(arg0) != 12:
raise ValueError('Invalid ISIN format: '
'given string must be 12 characters long.')
country_code = arg0[:2]
try:
countries.get(country_code)
except KeyError:
raise ValueError(f"Unknown country code: '{country_code}'.")
nsin = arg0[2:-1]
check_digit = self.__class__.calc_check_digit(country_code, nsin)
if check_digit != arg0[-1]:
raise ValueError("Wrong check digit; should be "
f"'{check_digit}'.")
self._id = arg0
elif n_args == 2:
arg0 = args[0]
if not isinstance(arg0, str):
raise TypeError("Country code must be instance of 'str'.")
if len(arg0) != 2:
raise ValueError("Country code must be a 2-character string.")
country_code = arg0
try:
countries.get(country_code)
except KeyError:
raise ValueError(f"Unknown country code: '{country_code}'.")
arg1 = args[1]
if isinstance(arg1, str):
len_nsin = len(arg1)
if len_nsin == 9:
nsin = arg1
elif 6 <= len_nsin < 9:
nsin = arg1.rjust(9, '0')
else:
raise ValueError("Given NSIN must contain between 6 and 9"
" characters.")
else:
raise TypeError("Given nsin must be instance of 'str'.")
check_digit = self.__class__.calc_check_digit(country_code, nsin)
self._id = ''.join((country_code, nsin, check_digit))
else:
raise TypeError('Invalid number of arguments.')
def __str__(self) -> str:
"""str(self)"""
return self._id | 0.915639 | 0.540985 |
import numpy as np
import matplotlib.pyplot as plt
from . import transform
def plot_pulse(pulse, m=1000, ptype='ex', phase='linear',
omega_range=[-np.pi, np.pi],
linewidth=2, fontsize='x-large', labelsize='large'):
figs = []
n = len(pulse)
fig, ax = plt.subplots()
ax.plot(pulse.real, label=r'$B_{1, \mathrm{x}}$', linewidth=linewidth)
ax.plot(pulse.imag, label=r'$B_{1, \mathrm{y}}$', linewidth=linewidth)
ax.set_title(r'$B_1$ (Energy={0:.3g}, Peak={1:.3g})'.format(
np.sum(np.abs(pulse)**2), np.max(np.abs(pulse))), fontsize=fontsize)
ax.set_xlabel('Time', fontsize=fontsize)
ax.legend(fontsize=fontsize)
ax.yaxis.set_tick_params(labelsize=labelsize)
ax.xaxis.set_tick_params(labelsize=labelsize)
figs.append(fig)
omega = np.linspace(omega_range[0], omega_range[1], m)
psi_z = np.exp(-1j * np.outer(omega, np.arange(n)))
a, b = transform.forward_slr(pulse)
alpha = psi_z @ a
beta = psi_z @ b
if ptype == 'se':
m_xy = beta**2
m_xy *= np.exp(1j * omega * (n - 1))
fig, ax = plt.subplots()
ax.set_title(r'$M_{\mathrm{xy}}$')
ax.set_xlabel(r'$\omega$ [radian]')
ax.plot(omega, np.real(m_xy), label=r'$M_{\mathrm{x}}$', linewidth=linewidth)
ax.plot(omega, np.imag(m_xy), label=r'$M_{\mathrm{y}}$', linewidth=linewidth)
ax.legend(fontsize=fontsize)
ax.yaxis.set_tick_params(labelsize=labelsize)
ax.xaxis.set_tick_params(labelsize=labelsize)
figs.append(fig)
else:
m_xy = 2 * alpha.conjugate() * beta
m_z = np.abs(alpha)**2 - np.abs(beta)**2
if phase == 'linear':
m_xy *= np.exp(1j * omega * n / 2)
fig, ax = plt.subplots()
ax.set_title(r'$|M_{\mathrm{xy}}|$', fontsize=fontsize)
ax.set_xlabel(r'$\omega$ [radian]', fontsize=fontsize)
ax.plot(omega, np.abs(m_xy), linewidth=linewidth)
ax.yaxis.set_tick_params(labelsize=labelsize)
ax.xaxis.set_tick_params(labelsize=labelsize)
figs.append(fig)
fig, ax = plt.subplots()
ax.set_title(r'$\angle M_{\mathrm{xy}}$', fontsize=fontsize)
ax.set_xlabel(r'$\omega$ [radian]', fontsize=fontsize)
ax.plot(omega, np.angle(m_xy), linewidth=linewidth)
ax.yaxis.set_tick_params(labelsize=labelsize)
ax.xaxis.set_tick_params(labelsize=labelsize)
figs.append(fig)
fig, ax = plt.subplots()
ax.set_title(r'$M_{\mathrm{z}}$', fontsize=fontsize)
ax.set_xlabel(r'$\omega$ [radian]', fontsize=fontsize)
ax.plot(omega, m_z, linewidth=linewidth)
ax.yaxis.set_tick_params(labelsize=labelsize)
ax.xaxis.set_tick_params(labelsize=labelsize)
figs.append(fig)
return figs
def plot_slr_pulses(pulse_slr, pulse_slfrank,
m=1000, ptype='ex', phase='linear',
omega_range=[-np.pi, np.pi],
fontsize='x-large', labelsize='large'):
n = len(pulse_slr)
fig, axs = plt.subplots(2, 2)
axs[0][0].plot(pulse_slr.real,
linewidth=0.5,
label='SLR',
color='tab:orange')
axs[0][0].plot(pulse_slfrank.real,
linewidth=0.5,
label='SLfRank',
color='tab:blue')
axs[0][0].set_title(r'$B_{1}$')
axs[0][0].set_xlabel('Time')
axs[0][0].legend()
omega = np.linspace(omega_range[0], omega_range[1], m)
psi_z = np.exp(-1j * np.outer(omega, np.arange(n)))
a_slr, b_slr = transform.forward_slr(pulse_slr)
alpha_slr = psi_z @ a_slr
beta_slr = psi_z @ b_slr
a_slfrank, b_slfrank = transform.forward_slr(pulse_slfrank)
alpha_slfrank = psi_z @ a_slfrank
beta_slfrank = psi_z @ b_slfrank
if ptype == 'se':
m_xy_slr = beta_slr**2
m_xy_slr *= np.exp(1j * omega * (n - 1))
m_z_slr = 2 * np.imag(alpha_slr * beta_slr)
m_xy_slfrank = beta_slfrank**2
m_xy_slfrank *= np.exp(1j * omega * (n - 1))
m_z_slfrank = 2 * np.imag(alpha_slfrank * beta_slfrank)
axs[1][0].set_title(r'$M_{\mathrm{x}}$')
axs[1][0].set_xlabel(r'$\omega$ [radian]')
axs[1][0].plot(omega, np.real(m_xy_slr),
linewidth=0.5,
label=r'SLR',
color='tab:orange')
axs[1][0].plot(omega, np.real(m_xy_slfrank),
linewidth=0.5,
label='SLfRank',
color='tab:blue')
axs[1][1].set_title(r'$M_{\mathrm{y}}$')
axs[1][1].set_xlabel(r'$\omega$ [radian]')
axs[1][1].plot(omega, np.imag(m_xy_slr),
linewidth=0.5,
label=r'SLR',
color='tab:orange')
axs[1][1].plot(omega, np.imag(m_xy_slfrank),
linewidth=0.5,
label='SLfRank',
color='tab:blue')
axs[0][1].set_title(r'$M_{\mathrm{z}}$')
axs[0][1].set_xlabel(r'$\omega$ [radian]')
axs[0][1].plot(omega, m_z_slr,
linewidth=0.5,
label=r'SLR',
color='tab:orange')
axs[0][1].plot(omega, m_z_slfrank,
linewidth=0.5,
label='SLfRank',
color='tab:blue')
else:
m_xy_slr = 2 * alpha_slr.conjugate() * beta_slr
m_z_slr = np.abs(alpha_slr)**2 - np.abs(beta_slr)**2
m_xy_slfrank = 2 * alpha_slfrank.conjugate() * beta_slfrank
m_z_slfrank = np.abs(alpha_slfrank)**2 - np.abs(beta_slfrank)**2
if phase == 'linear':
m_xy_slr *= np.exp(1j * omega * n / 2)
m_xy_slfrank *= np.exp(1j * omega * n / 2)
axs[1][0].set_title(r'$|M_{\mathrm{xy}}|$')
axs[1][0].set_xlabel(r'$\omega$ [radian]')
axs[1][0].plot(omega, np.abs(m_xy_slr),
linewidth=0.5,
label=r'SLR',
color='tab:orange')
axs[1][0].plot(omega, np.abs(m_xy_slfrank),
linewidth=0.5,
label=r'SLfRank',
color='tab:blue')
axs[1][1].set_title(r'$\angle M_{\mathrm{xy}}$')
axs[1][1].set_xlabel(r'$\omega$ [radian]')
axs[1][1].plot(omega, np.angle(m_xy_slr),
linewidth=0.5,
label=r'SLR',
color='tab:orange')
axs[1][1].plot(omega, np.angle(m_xy_slfrank),
linewidth=0.5,
label=r'SLfRank',
color='tab:blue')
axs[0][1].set_title(r'$M_{\mathrm{z}}$')
axs[0][1].set_xlabel(r'$\omega$ [radian]')
axs[0][1].plot(omega, m_z_slr,
linewidth=0.5,
label=r'SLR',
color='tab:orange')
axs[0][1].plot(omega, m_z_slfrank,
linewidth=0.5,
label=r'SLfRank',
color='tab:blue')
return fig | slfrank/plot.py | import numpy as np
import matplotlib.pyplot as plt
from . import transform
def plot_pulse(pulse, m=1000, ptype='ex', phase='linear',
omega_range=[-np.pi, np.pi],
linewidth=2, fontsize='x-large', labelsize='large'):
figs = []
n = len(pulse)
fig, ax = plt.subplots()
ax.plot(pulse.real, label=r'$B_{1, \mathrm{x}}$', linewidth=linewidth)
ax.plot(pulse.imag, label=r'$B_{1, \mathrm{y}}$', linewidth=linewidth)
ax.set_title(r'$B_1$ (Energy={0:.3g}, Peak={1:.3g})'.format(
np.sum(np.abs(pulse)**2), np.max(np.abs(pulse))), fontsize=fontsize)
ax.set_xlabel('Time', fontsize=fontsize)
ax.legend(fontsize=fontsize)
ax.yaxis.set_tick_params(labelsize=labelsize)
ax.xaxis.set_tick_params(labelsize=labelsize)
figs.append(fig)
omega = np.linspace(omega_range[0], omega_range[1], m)
psi_z = np.exp(-1j * np.outer(omega, np.arange(n)))
a, b = transform.forward_slr(pulse)
alpha = psi_z @ a
beta = psi_z @ b
if ptype == 'se':
m_xy = beta**2
m_xy *= np.exp(1j * omega * (n - 1))
fig, ax = plt.subplots()
ax.set_title(r'$M_{\mathrm{xy}}$')
ax.set_xlabel(r'$\omega$ [radian]')
ax.plot(omega, np.real(m_xy), label=r'$M_{\mathrm{x}}$', linewidth=linewidth)
ax.plot(omega, np.imag(m_xy), label=r'$M_{\mathrm{y}}$', linewidth=linewidth)
ax.legend(fontsize=fontsize)
ax.yaxis.set_tick_params(labelsize=labelsize)
ax.xaxis.set_tick_params(labelsize=labelsize)
figs.append(fig)
else:
m_xy = 2 * alpha.conjugate() * beta
m_z = np.abs(alpha)**2 - np.abs(beta)**2
if phase == 'linear':
m_xy *= np.exp(1j * omega * n / 2)
fig, ax = plt.subplots()
ax.set_title(r'$|M_{\mathrm{xy}}|$', fontsize=fontsize)
ax.set_xlabel(r'$\omega$ [radian]', fontsize=fontsize)
ax.plot(omega, np.abs(m_xy), linewidth=linewidth)
ax.yaxis.set_tick_params(labelsize=labelsize)
ax.xaxis.set_tick_params(labelsize=labelsize)
figs.append(fig)
fig, ax = plt.subplots()
ax.set_title(r'$\angle M_{\mathrm{xy}}$', fontsize=fontsize)
ax.set_xlabel(r'$\omega$ [radian]', fontsize=fontsize)
ax.plot(omega, np.angle(m_xy), linewidth=linewidth)
ax.yaxis.set_tick_params(labelsize=labelsize)
ax.xaxis.set_tick_params(labelsize=labelsize)
figs.append(fig)
fig, ax = plt.subplots()
ax.set_title(r'$M_{\mathrm{z}}$', fontsize=fontsize)
ax.set_xlabel(r'$\omega$ [radian]', fontsize=fontsize)
ax.plot(omega, m_z, linewidth=linewidth)
ax.yaxis.set_tick_params(labelsize=labelsize)
ax.xaxis.set_tick_params(labelsize=labelsize)
figs.append(fig)
return figs
def plot_slr_pulses(pulse_slr, pulse_slfrank,
m=1000, ptype='ex', phase='linear',
omega_range=[-np.pi, np.pi],
fontsize='x-large', labelsize='large'):
n = len(pulse_slr)
fig, axs = plt.subplots(2, 2)
axs[0][0].plot(pulse_slr.real,
linewidth=0.5,
label='SLR',
color='tab:orange')
axs[0][0].plot(pulse_slfrank.real,
linewidth=0.5,
label='SLfRank',
color='tab:blue')
axs[0][0].set_title(r'$B_{1}$')
axs[0][0].set_xlabel('Time')
axs[0][0].legend()
omega = np.linspace(omega_range[0], omega_range[1], m)
psi_z = np.exp(-1j * np.outer(omega, np.arange(n)))
a_slr, b_slr = transform.forward_slr(pulse_slr)
alpha_slr = psi_z @ a_slr
beta_slr = psi_z @ b_slr
a_slfrank, b_slfrank = transform.forward_slr(pulse_slfrank)
alpha_slfrank = psi_z @ a_slfrank
beta_slfrank = psi_z @ b_slfrank
if ptype == 'se':
m_xy_slr = beta_slr**2
m_xy_slr *= np.exp(1j * omega * (n - 1))
m_z_slr = 2 * np.imag(alpha_slr * beta_slr)
m_xy_slfrank = beta_slfrank**2
m_xy_slfrank *= np.exp(1j * omega * (n - 1))
m_z_slfrank = 2 * np.imag(alpha_slfrank * beta_slfrank)
axs[1][0].set_title(r'$M_{\mathrm{x}}$')
axs[1][0].set_xlabel(r'$\omega$ [radian]')
axs[1][0].plot(omega, np.real(m_xy_slr),
linewidth=0.5,
label=r'SLR',
color='tab:orange')
axs[1][0].plot(omega, np.real(m_xy_slfrank),
linewidth=0.5,
label='SLfRank',
color='tab:blue')
axs[1][1].set_title(r'$M_{\mathrm{y}}$')
axs[1][1].set_xlabel(r'$\omega$ [radian]')
axs[1][1].plot(omega, np.imag(m_xy_slr),
linewidth=0.5,
label=r'SLR',
color='tab:orange')
axs[1][1].plot(omega, np.imag(m_xy_slfrank),
linewidth=0.5,
label='SLfRank',
color='tab:blue')
axs[0][1].set_title(r'$M_{\mathrm{z}}$')
axs[0][1].set_xlabel(r'$\omega$ [radian]')
axs[0][1].plot(omega, m_z_slr,
linewidth=0.5,
label=r'SLR',
color='tab:orange')
axs[0][1].plot(omega, m_z_slfrank,
linewidth=0.5,
label='SLfRank',
color='tab:blue')
else:
m_xy_slr = 2 * alpha_slr.conjugate() * beta_slr
m_z_slr = np.abs(alpha_slr)**2 - np.abs(beta_slr)**2
m_xy_slfrank = 2 * alpha_slfrank.conjugate() * beta_slfrank
m_z_slfrank = np.abs(alpha_slfrank)**2 - np.abs(beta_slfrank)**2
if phase == 'linear':
m_xy_slr *= np.exp(1j * omega * n / 2)
m_xy_slfrank *= np.exp(1j * omega * n / 2)
axs[1][0].set_title(r'$|M_{\mathrm{xy}}|$')
axs[1][0].set_xlabel(r'$\omega$ [radian]')
axs[1][0].plot(omega, np.abs(m_xy_slr),
linewidth=0.5,
label=r'SLR',
color='tab:orange')
axs[1][0].plot(omega, np.abs(m_xy_slfrank),
linewidth=0.5,
label=r'SLfRank',
color='tab:blue')
axs[1][1].set_title(r'$\angle M_{\mathrm{xy}}$')
axs[1][1].set_xlabel(r'$\omega$ [radian]')
axs[1][1].plot(omega, np.angle(m_xy_slr),
linewidth=0.5,
label=r'SLR',
color='tab:orange')
axs[1][1].plot(omega, np.angle(m_xy_slfrank),
linewidth=0.5,
label=r'SLfRank',
color='tab:blue')
axs[0][1].set_title(r'$M_{\mathrm{z}}$')
axs[0][1].set_xlabel(r'$\omega$ [radian]')
axs[0][1].plot(omega, m_z_slr,
linewidth=0.5,
label=r'SLR',
color='tab:orange')
axs[0][1].plot(omega, m_z_slfrank,
linewidth=0.5,
label=r'SLfRank',
color='tab:blue')
return fig | 0.682891 | 0.657525 |
"""Command line options."""
import click
from renku.core.errors import RenkuException
from .git import set_git_isolation
def install_completion(ctx, attr, value): # pragma: no cover
"""Install completion for the current shell."""
import click_completion.core
if not value or ctx.resilient_parsing:
return value
shell, path = click_completion.core.install()
click.secho("{0} completion installed in {1}".format(shell, path), fg="green")
ctx.exit()
option_isolation = click.option(
"--isolation",
is_flag=True,
default=False,
callback=lambda ctx, param, value: set_git_isolation(value),
help="Set up the isolation for invoking of the given command.",
)
def check_siblings(graph, outputs):
"""Check that all outputs have their siblings listed."""
siblings = set()
for node in outputs:
siblings |= graph.siblings(node)
siblings = {node.path for node in siblings}
missing = siblings - {node.path for node in outputs}
missing = {m for m in missing if all(not m.startswith(node.path) for node in outputs)}
if missing:
msg = "Include the files above in the command " "or use the --with-siblings option."
raise RenkuException(
"There are missing output siblings:\n\n"
"\t{0}\n\n{1}".format("\n\t".join(click.style(path, fg="red") for path in missing), msg,),
)
return outputs
def with_siblings(graph, outputs):
"""Include all missing siblings."""
siblings = set()
for node in outputs:
siblings |= graph.siblings(node)
return siblings
option_check_siblings = click.option(
"--check-siblings", "siblings", flag_value=check_siblings, default=True, help=check_siblings.__doc__,
)
option_with_siblings = click.option(
"--with-siblings", "siblings", flag_value=with_siblings, default=True, help=with_siblings.__doc__,
)
def option_siblings(func):
"""Combine siblings options."""
return option_check_siblings(option_with_siblings(func))
option_external_storage_requested = click.option(
"external_storage_requested",
"--external-storage/--no-external-storage",
" /-S",
is_flag=True,
default=True,
help="Use an external file storage service.",
) | renku/core/commands/options.py | """Command line options."""
import click
from renku.core.errors import RenkuException
from .git import set_git_isolation
def install_completion(ctx, attr, value): # pragma: no cover
"""Install completion for the current shell."""
import click_completion.core
if not value or ctx.resilient_parsing:
return value
shell, path = click_completion.core.install()
click.secho("{0} completion installed in {1}".format(shell, path), fg="green")
ctx.exit()
option_isolation = click.option(
"--isolation",
is_flag=True,
default=False,
callback=lambda ctx, param, value: set_git_isolation(value),
help="Set up the isolation for invoking of the given command.",
)
def check_siblings(graph, outputs):
"""Check that all outputs have their siblings listed."""
siblings = set()
for node in outputs:
siblings |= graph.siblings(node)
siblings = {node.path for node in siblings}
missing = siblings - {node.path for node in outputs}
missing = {m for m in missing if all(not m.startswith(node.path) for node in outputs)}
if missing:
msg = "Include the files above in the command " "or use the --with-siblings option."
raise RenkuException(
"There are missing output siblings:\n\n"
"\t{0}\n\n{1}".format("\n\t".join(click.style(path, fg="red") for path in missing), msg,),
)
return outputs
def with_siblings(graph, outputs):
"""Include all missing siblings."""
siblings = set()
for node in outputs:
siblings |= graph.siblings(node)
return siblings
option_check_siblings = click.option(
"--check-siblings", "siblings", flag_value=check_siblings, default=True, help=check_siblings.__doc__,
)
option_with_siblings = click.option(
"--with-siblings", "siblings", flag_value=with_siblings, default=True, help=with_siblings.__doc__,
)
def option_siblings(func):
"""Combine siblings options."""
return option_check_siblings(option_with_siblings(func))
option_external_storage_requested = click.option(
"external_storage_requested",
"--external-storage/--no-external-storage",
" /-S",
is_flag=True,
default=True,
help="Use an external file storage service.",
) | 0.715722 | 0.276346 |
import os
import tempfile
import time
import genomepy.utils
import pysam
import ananse.utils
# prep
test_dir = os.path.dirname(os.path.dirname(__file__))
outdir = os.path.join(test_dir, "output")
genomepy.utils.mkdir_p(outdir)
def write_file(filename, lines):
with open(filename, "w") as f:
for line in lines:
if not line.endswith("\n"):
line = line + "\n"
f.write(line)
def write_bam(filename, lines):
tmp_sam = os.path.join(outdir, "tmp.sam")
write_file(tmp_sam, lines)
pysam.view(tmp_sam, "-b", "-o", filename, catch_stdout=False)
genomepy.utils.rm_rf(tmp_sam)
def compare_contents(file1, file2, ftype="bed"):
if ftype == "bed":
with open(file1) as f:
contents1 = f.readlines()
with open(file2) as f:
contents2 = f.readlines()
else:
contents1 = pysam.view(file1)
contents2 = pysam.view(file2)
return contents1 == contents2
# test BED functions
unsorted_bed = os.path.join(outdir, "unsorted.bed")
write_file(unsorted_bed, ["chr1\t817046\t817246\n", "chr1\t778558\t778758\n"])
sorted_bed = os.path.join(outdir, "sorted.bed")
write_file(sorted_bed, ["chr1\t778558\t778758\n", "chr1\t817046\t817246\n"])
second_bed = os.path.join(outdir, "second.bed")
write_file(second_bed, ["chr1\t827457\t827657\n"])
def test_bed_sort():
assert not compare_contents(unsorted_bed, sorted_bed, ftype="bed")
ananse.utils.bed_sort(unsorted_bed)
assert compare_contents(unsorted_bed, sorted_bed, ftype="bed")
def test_bed_merge():
merged_bed = os.path.join(outdir, "merged.bed")
# 1 bed = nothing changes
ananse.utils.bed_merge([sorted_bed], merged_bed)
assert compare_contents(sorted_bed, merged_bed, ftype="bed")
# >1 bed, same content
ananse.utils.bed_merge([unsorted_bed, sorted_bed], merged_bed)
assert compare_contents(sorted_bed, merged_bed, ftype="bed")
with open(merged_bed) as mb:
assert len(mb.readlines()) == 2
# >1 beds, different content
ananse.utils.bed_merge([unsorted_bed, second_bed], merged_bed)
with open(merged_bed) as mb:
assert len(mb.readlines()) == 3
# test BAM functions
h0 = "@HD VN:1.6 SO:coordinate"
h1 = "@SQ SN:chr1 LN:50000"
line1 = (
"read1 147 chr1 10003 40 11S90M = 10048 -46 "
+ "CCCTACCCTCTCCCTATCCCTAACCCTAACCCCAACCCTAACCCTATCCCCAACCCTAACCCTAACCCTAACCCTAACCCTAACCCTAACCCTAACCCTAA "
+ "A77--7-7---7-7---A77---AA7----<7-AAJAA-7JJFF<--F-A-AFFFF<FJJJJF-AFJF7F-JJJFJFFFJFF<FJJJJFJJFJJFFFFFAA "
)
line2 = (
"read2 83 chr1 10004 30 2S45M1D54M = 10074 -30 "
+ "ATCCCTAACCCTAACCCTAACCCTAACCCTACCCCTACCCCTAACCCAACCCTAACCCTAACCCTAACCCTAACCCTAACCCTAACCCTAACCCTAACCCT "
+ "--JAA7F-FAFA-7JJFA--F<7-FF<<FAF7<7F7A-FFAF7-FJJJFJJ----J<JFA-JAF7JFJFJF<<JFJF<JJJFFJJJAAAA-JFFFA-FAA- "
)
line3 = (
"read3 163 chr1 10027 40 100M = 10032 105 "
+ "ACCCGAACCCTAACCCTAACCCTAACCCTAACCCGAACCCTAACCCTAACCCTAACCCTAACCCTAACCCTAACCCTAACCCAACCCTAACCCGAACCCA "
+ "AAFFFJJJJJJJJJJJFJJJFJJJFJFJJFJJJJ<-FJJFJAFFJA7AFAJJJJFJFJ-<F-AAJJ<FF7-J-AAJ--<JJJ--AAJ-77-AA-7A<-A- "
)
unsorted_bam = os.path.join(outdir, "unsorted.bam")
write_bam(unsorted_bam, [h0, h1, line2, line1])
sorted_bam = os.path.join(outdir, "sorted.bam")
write_bam(sorted_bam, [h0, h1, line1, line2])
second_bam = os.path.join(outdir, "second.bam")
write_bam(second_bam, [h0, h1, line3])
def test_bam_index():
ncores = os.cpu_count() # test max cores
genomepy.utils.rm_rf(f"{sorted_bam}.bai")
assert not os.path.exists(f"{sorted_bam}.bai")
ananse.utils.bam_index(sorted_bam, ncore=ncores)
assert os.path.exists(f"{sorted_bam}.bai")
# test force
t0 = os.path.getmtime(f"{sorted_bam}.bai")
time.sleep(1)
ananse.utils.bam_index(sorted_bam, force=False, ncore=ncores)
t1 = os.path.getmtime(f"{sorted_bam}.bai")
assert t0 == t1
ananse.utils.bam_index(sorted_bam, force=True, ncore=ncores)
t1 = os.path.getmtime(f"{sorted_bam}.bai")
assert t0 != t1
def test_bam_sort():
ncores = -999 # test min cores
assert not compare_contents(sorted_bam, unsorted_bam, ftype="bam")
ananse.utils.bam_sort(unsorted_bam, ncore=ncores)
assert compare_contents(sorted_bam, unsorted_bam, ftype="bam")
assert os.path.exists(f"{unsorted_bam}.bai") # bam is indexed
# bam is identical to the already sorted bam
ananse.utils.bam_index(sorted_bam, force=False)
assert os.path.getsize(f"{unsorted_bam}.bai") == os.path.getsize(
f"{sorted_bam}.bai"
)
def test_bam_merge():
ncores = min(2, os.cpu_count()) # test average cores
merged_bam = os.path.join(outdir, "merged.bam")
# 1 bam: copy
ananse.utils.bam_merge([sorted_bam], merged_bam, ncore=ncores)
assert compare_contents(sorted_bam, merged_bam, ftype="bam")
assert os.path.getsize(f"{sorted_bam}.bai") == os.path.getsize(f"{merged_bam}.bai")
# >1 bam: merge
ananse.utils.bam_merge([sorted_bam, second_bam], merged_bam, ncore=ncores)
l1 = pysam.view(sorted_bam).strip().split("\n")
l2 = pysam.view(second_bam).strip().split("\n")
l3 = pysam.view(merged_bam).strip().split("\n")
assert len(l1) + len(l2) == len(l3) == 3
def test_mosdepth():
bed_input = os.path.join(outdir, "mosdepth_input.bed")
write_file(bed_input, ["chr1\t10003\t10203\n", "chr1\t10203\t10403\n"])
# bam = sorted & indexed (required)
bam_input = os.path.join(outdir, "mosdepth_input.bam")
write_bam(bam_input, [h0, h1, line1, line2, line3])
ananse.utils.bam_index(bam_input, ncore=os.cpu_count())
bed_output = os.path.join(outdir, "mosdepth_output.bed")
ananse.utils.mosdepth(bed_input, bam_input, bed_output, ncore=1)
with open(bed_output) as f:
score = f.readlines()[0].strip().split("\t")[3]
assert score == "1.00"
# test other functions
def test_cleanpath():
path = "./tests/continuous_integration/test_02_utils.py"
expected = __file__
res = ananse.utils.cleanpath(path)
assert res == expected
path = "~/../.."
expected = "/"
res = ananse.utils.cleanpath(path)
assert res == expected
def test_mytmpdir():
tmpdir = ananse.utils.mytmpdir()
assert os.path.exists(tmpdir)
assert tempfile.gettempdir() in tmpdir
def test_clean_tmp():
tmpdir = ananse.utils.mytmpdir()
assert os.path.exists(tmpdir)
ananse.utils.clean_tmp()
assert not os.path.exists(tmpdir) | tests/continuous_integration/test_02_utils.py | import os
import tempfile
import time
import genomepy.utils
import pysam
import ananse.utils
# prep
test_dir = os.path.dirname(os.path.dirname(__file__))
outdir = os.path.join(test_dir, "output")
genomepy.utils.mkdir_p(outdir)
def write_file(filename, lines):
with open(filename, "w") as f:
for line in lines:
if not line.endswith("\n"):
line = line + "\n"
f.write(line)
def write_bam(filename, lines):
tmp_sam = os.path.join(outdir, "tmp.sam")
write_file(tmp_sam, lines)
pysam.view(tmp_sam, "-b", "-o", filename, catch_stdout=False)
genomepy.utils.rm_rf(tmp_sam)
def compare_contents(file1, file2, ftype="bed"):
if ftype == "bed":
with open(file1) as f:
contents1 = f.readlines()
with open(file2) as f:
contents2 = f.readlines()
else:
contents1 = pysam.view(file1)
contents2 = pysam.view(file2)
return contents1 == contents2
# test BED functions
unsorted_bed = os.path.join(outdir, "unsorted.bed")
write_file(unsorted_bed, ["chr1\t817046\t817246\n", "chr1\t778558\t778758\n"])
sorted_bed = os.path.join(outdir, "sorted.bed")
write_file(sorted_bed, ["chr1\t778558\t778758\n", "chr1\t817046\t817246\n"])
second_bed = os.path.join(outdir, "second.bed")
write_file(second_bed, ["chr1\t827457\t827657\n"])
def test_bed_sort():
assert not compare_contents(unsorted_bed, sorted_bed, ftype="bed")
ananse.utils.bed_sort(unsorted_bed)
assert compare_contents(unsorted_bed, sorted_bed, ftype="bed")
def test_bed_merge():
merged_bed = os.path.join(outdir, "merged.bed")
# 1 bed = nothing changes
ananse.utils.bed_merge([sorted_bed], merged_bed)
assert compare_contents(sorted_bed, merged_bed, ftype="bed")
# >1 bed, same content
ananse.utils.bed_merge([unsorted_bed, sorted_bed], merged_bed)
assert compare_contents(sorted_bed, merged_bed, ftype="bed")
with open(merged_bed) as mb:
assert len(mb.readlines()) == 2
# >1 beds, different content
ananse.utils.bed_merge([unsorted_bed, second_bed], merged_bed)
with open(merged_bed) as mb:
assert len(mb.readlines()) == 3
# test BAM functions
h0 = "@HD VN:1.6 SO:coordinate"
h1 = "@SQ SN:chr1 LN:50000"
line1 = (
"read1 147 chr1 10003 40 11S90M = 10048 -46 "
+ "CCCTACCCTCTCCCTATCCCTAACCCTAACCCCAACCCTAACCCTATCCCCAACCCTAACCCTAACCCTAACCCTAACCCTAACCCTAACCCTAACCCTAA "
+ "A77--7-7---7-7---A77---AA7----<7-AAJAA-7JJFF<--F-A-AFFFF<FJJJJF-AFJF7F-JJJFJFFFJFF<FJJJJFJJFJJFFFFFAA "
)
line2 = (
"read2 83 chr1 10004 30 2S45M1D54M = 10074 -30 "
+ "ATCCCTAACCCTAACCCTAACCCTAACCCTACCCCTACCCCTAACCCAACCCTAACCCTAACCCTAACCCTAACCCTAACCCTAACCCTAACCCTAACCCT "
+ "--JAA7F-FAFA-7JJFA--F<7-FF<<FAF7<7F7A-FFAF7-FJJJFJJ----J<JFA-JAF7JFJFJF<<JFJF<JJJFFJJJAAAA-JFFFA-FAA- "
)
line3 = (
"read3 163 chr1 10027 40 100M = 10032 105 "
+ "ACCCGAACCCTAACCCTAACCCTAACCCTAACCCGAACCCTAACCCTAACCCTAACCCTAACCCTAACCCTAACCCTAACCCAACCCTAACCCGAACCCA "
+ "AAFFFJJJJJJJJJJJFJJJFJJJFJFJJFJJJJ<-FJJFJAFFJA7AFAJJJJFJFJ-<F-AAJJ<FF7-J-AAJ--<JJJ--AAJ-77-AA-7A<-A- "
)
unsorted_bam = os.path.join(outdir, "unsorted.bam")
write_bam(unsorted_bam, [h0, h1, line2, line1])
sorted_bam = os.path.join(outdir, "sorted.bam")
write_bam(sorted_bam, [h0, h1, line1, line2])
second_bam = os.path.join(outdir, "second.bam")
write_bam(second_bam, [h0, h1, line3])
def test_bam_index():
ncores = os.cpu_count() # test max cores
genomepy.utils.rm_rf(f"{sorted_bam}.bai")
assert not os.path.exists(f"{sorted_bam}.bai")
ananse.utils.bam_index(sorted_bam, ncore=ncores)
assert os.path.exists(f"{sorted_bam}.bai")
# test force
t0 = os.path.getmtime(f"{sorted_bam}.bai")
time.sleep(1)
ananse.utils.bam_index(sorted_bam, force=False, ncore=ncores)
t1 = os.path.getmtime(f"{sorted_bam}.bai")
assert t0 == t1
ananse.utils.bam_index(sorted_bam, force=True, ncore=ncores)
t1 = os.path.getmtime(f"{sorted_bam}.bai")
assert t0 != t1
def test_bam_sort():
ncores = -999 # test min cores
assert not compare_contents(sorted_bam, unsorted_bam, ftype="bam")
ananse.utils.bam_sort(unsorted_bam, ncore=ncores)
assert compare_contents(sorted_bam, unsorted_bam, ftype="bam")
assert os.path.exists(f"{unsorted_bam}.bai") # bam is indexed
# bam is identical to the already sorted bam
ananse.utils.bam_index(sorted_bam, force=False)
assert os.path.getsize(f"{unsorted_bam}.bai") == os.path.getsize(
f"{sorted_bam}.bai"
)
def test_bam_merge():
ncores = min(2, os.cpu_count()) # test average cores
merged_bam = os.path.join(outdir, "merged.bam")
# 1 bam: copy
ananse.utils.bam_merge([sorted_bam], merged_bam, ncore=ncores)
assert compare_contents(sorted_bam, merged_bam, ftype="bam")
assert os.path.getsize(f"{sorted_bam}.bai") == os.path.getsize(f"{merged_bam}.bai")
# >1 bam: merge
ananse.utils.bam_merge([sorted_bam, second_bam], merged_bam, ncore=ncores)
l1 = pysam.view(sorted_bam).strip().split("\n")
l2 = pysam.view(second_bam).strip().split("\n")
l3 = pysam.view(merged_bam).strip().split("\n")
assert len(l1) + len(l2) == len(l3) == 3
def test_mosdepth():
bed_input = os.path.join(outdir, "mosdepth_input.bed")
write_file(bed_input, ["chr1\t10003\t10203\n", "chr1\t10203\t10403\n"])
# bam = sorted & indexed (required)
bam_input = os.path.join(outdir, "mosdepth_input.bam")
write_bam(bam_input, [h0, h1, line1, line2, line3])
ananse.utils.bam_index(bam_input, ncore=os.cpu_count())
bed_output = os.path.join(outdir, "mosdepth_output.bed")
ananse.utils.mosdepth(bed_input, bam_input, bed_output, ncore=1)
with open(bed_output) as f:
score = f.readlines()[0].strip().split("\t")[3]
assert score == "1.00"
# test other functions
def test_cleanpath():
path = "./tests/continuous_integration/test_02_utils.py"
expected = __file__
res = ananse.utils.cleanpath(path)
assert res == expected
path = "~/../.."
expected = "/"
res = ananse.utils.cleanpath(path)
assert res == expected
def test_mytmpdir():
tmpdir = ananse.utils.mytmpdir()
assert os.path.exists(tmpdir)
assert tempfile.gettempdir() in tmpdir
def test_clean_tmp():
tmpdir = ananse.utils.mytmpdir()
assert os.path.exists(tmpdir)
ananse.utils.clean_tmp()
assert not os.path.exists(tmpdir) | 0.375134 | 0.412767 |
from unittest.mock import Mock
import pytest
from fastapi import HTTPException, status
from tests.unit.time_manager.helpers import get_token_from_user
from time_manager.db.sql.user import User
from time_manager.routers.common import get_user_dao, oauth2_scheme
from time_manager.schemas.user import UserDB
from time_manager.utils.auth import get_password_hash
authorized_user = UserDB(id=1, username="AAA", hashed_password="a", email="a<EMAIL>")
@pytest.mark.parametrize(
"oauth2_result, dao_result, expected_status",
[
(
HTTPException(status_code=status.HTTP_401_UNAUTHORIZED),
[],
status.HTTP_401_UNAUTHORIZED,
),
(
get_token_from_user(user=authorized_user),
[User(id=1, username="QWERTY", hashed_password="<PASSWORD>", email="a<EMAIL>")],
status.HTTP_200_OK,
),
("", [], status.HTTP_401_UNAUTHORIZED),
(
get_token_from_user(user=authorized_user),
[],
status.HTTP_401_UNAUTHORIZED,
),
],
)
def test_logout(oauth2_result, dao_result, expected_status, client, app):
mock_oath2_scheme = Mock()
oauth2_throw = False
if isinstance(oauth2_result, Exception):
oauth2_throw = True
mock_oath2_scheme.side_effect = oauth2_result
else:
mock_oath2_scheme.return_value = oauth2_result
app.dependency_overrides[oauth2_scheme] = lambda: mock_oath2_scheme()
dao = Mock()
dao.filter.return_value = iter(dao_result)
app.dependency_overrides[get_user_dao] = lambda: dao
response = client.delete("/logout")
assert mock_oath2_scheme.call_count == 1
# dao.filter is called only when the token is set in either
# a header or a cookie
assert dao.filter.call_count == 0 if oauth2_throw else 1
assert response.status_code == expected_status
assert response.cookies.get("Authorization", default=None) is None
@pytest.mark.parametrize(
"credentials, expected_result, expected_status",
[
({"username": "u1", "assword": "p1"}, [], status.HTTP_422_UNPROCESSABLE_ENTITY),
({"username": "u1", "password": "p1"}, [], status.HTTP_401_UNAUTHORIZED),
(
{"username": "u1", "password": "p1"},
[
User(
id=1,
username="u1",
hashed_password=get_password_hash("p1"),
email="a@a.a",
)
],
status.HTTP_200_OK,
),
],
)
def test_login(credentials, expected_result, expected_status, unauthorized_client, app):
dao = Mock()
dao.filter.return_value = iter(expected_result)
app.dependency_overrides[get_user_dao] = lambda: dao
response = unauthorized_client.post("/login", json=credentials)
assert response.status_code == expected_status
assert dao.call_count <= 1
if dao.call_count:
assert dao.call_args.args[0].dict() == credentials
if expected_status == status.HTTP_200_OK:
assert response.cookies.get("Authorization", default=None) is not None
assert "Bearer" in response.cookies["Authorization"] | tests/unit/time_manager/routers/test_auth.py | from unittest.mock import Mock
import pytest
from fastapi import HTTPException, status
from tests.unit.time_manager.helpers import get_token_from_user
from time_manager.db.sql.user import User
from time_manager.routers.common import get_user_dao, oauth2_scheme
from time_manager.schemas.user import UserDB
from time_manager.utils.auth import get_password_hash
authorized_user = UserDB(id=1, username="AAA", hashed_password="a", email="a<EMAIL>")
@pytest.mark.parametrize(
"oauth2_result, dao_result, expected_status",
[
(
HTTPException(status_code=status.HTTP_401_UNAUTHORIZED),
[],
status.HTTP_401_UNAUTHORIZED,
),
(
get_token_from_user(user=authorized_user),
[User(id=1, username="QWERTY", hashed_password="<PASSWORD>", email="a<EMAIL>")],
status.HTTP_200_OK,
),
("", [], status.HTTP_401_UNAUTHORIZED),
(
get_token_from_user(user=authorized_user),
[],
status.HTTP_401_UNAUTHORIZED,
),
],
)
def test_logout(oauth2_result, dao_result, expected_status, client, app):
mock_oath2_scheme = Mock()
oauth2_throw = False
if isinstance(oauth2_result, Exception):
oauth2_throw = True
mock_oath2_scheme.side_effect = oauth2_result
else:
mock_oath2_scheme.return_value = oauth2_result
app.dependency_overrides[oauth2_scheme] = lambda: mock_oath2_scheme()
dao = Mock()
dao.filter.return_value = iter(dao_result)
app.dependency_overrides[get_user_dao] = lambda: dao
response = client.delete("/logout")
assert mock_oath2_scheme.call_count == 1
# dao.filter is called only when the token is set in either
# a header or a cookie
assert dao.filter.call_count == 0 if oauth2_throw else 1
assert response.status_code == expected_status
assert response.cookies.get("Authorization", default=None) is None
@pytest.mark.parametrize(
"credentials, expected_result, expected_status",
[
({"username": "u1", "assword": "p1"}, [], status.HTTP_422_UNPROCESSABLE_ENTITY),
({"username": "u1", "password": "p1"}, [], status.HTTP_401_UNAUTHORIZED),
(
{"username": "u1", "password": "p1"},
[
User(
id=1,
username="u1",
hashed_password=get_password_hash("p1"),
email="a@a.a",
)
],
status.HTTP_200_OK,
),
],
)
def test_login(credentials, expected_result, expected_status, unauthorized_client, app):
dao = Mock()
dao.filter.return_value = iter(expected_result)
app.dependency_overrides[get_user_dao] = lambda: dao
response = unauthorized_client.post("/login", json=credentials)
assert response.status_code == expected_status
assert dao.call_count <= 1
if dao.call_count:
assert dao.call_args.args[0].dict() == credentials
if expected_status == status.HTTP_200_OK:
assert response.cookies.get("Authorization", default=None) is not None
assert "Bearer" in response.cookies["Authorization"] | 0.707607 | 0.298095 |
import pandas as pd
import lightgbm as lgb
from sklearn.metrics import roc_auc_score
from Utils import Profiler
from IPython.display import display
from Estimators import LGBM
profile = Profiler()
profile.Start()
print("Loading resampled train data")
train_X = pd.read_csv("../input/AllData_v4_os.train")
train_X.pop("Unnamed: 0")
print("Loading resampled train labels")
train_y = pd.read_csv("../input/AllData_v4_os.label")
train_y = train_y.pop("TARGET")
print("Loading resampled validation data")
valid_X = pd.read_csv("../input/AllData_v4_os_valid.train")
valid_X.pop("Unnamed: 0")
print("Loading resampled validation labels")
valid_y = pd.read_csv("../input/AllData_v4_os_valid.label")
valid_y = valid_y.pop("TARGET")
print("Loading application test data")
test_X = pd.read_csv("../input/AllData_v4.test")
print("train_y shape: " + str(train_y.shape))
print("train_X shape: " + str(train_X.shape))
print("valid_y shape: " + str(valid_y.shape))
print("valid_X shape: " + str(valid_X.shape))
print("test_X shape: " + str(test_X.shape))
lgb_train = lgb.Dataset(train_X, train_y)
lgb_test = lgb.Dataset(valid_X)
# Define estimator parameters
params = {'task' :'train',
'objective' :'binary',
'learning_rate' :0.1,
'num_leaves' :31,
'max_depth' :8,
'min_data_in_leaf' :20,
'min_sum_hessian_in_leaf' :0.001,
'lambda_l1' :0,
'lambda_l2' :0,
'scale_pos_weight' :1,
'metric' :'auc',
'verbose' :-1}
# Parameters that are to be supplied to cross-validation
cv_params = {
"train_set" : lgb_train,
"num_boost_round" : 10000,
"nfold" : 5,
"early_stopping_rounds" : 50,
"verbose_eval" : 10
}
# Step 1
print("Performing Gridsearch Step-1")
param_grid = {"num_leaves" : range(10,101,10)}
lgbm = LGBM(params)
gs_results, params = lgbm.gridsearch(param_grid, cv_params)
gs_summary = gs_results
# Step 2
print("Performing Gridsearch Step-2")
param_grid = {"max_depth" : range(3,10,1)}
lgbm = LGBM(params)
gs_results, params = lgbm.gridsearch(param_grid, cv_params)
gs_summary = pd.concat([gs_summary, gs_results], ignore_index=True)
# Step 3
print("Performing Gridsearch Step-3")
param_grid = {"min_data_in_leaf" : range(10,81,10)}
lgbm = LGBM(params)
gs_results, params = lgbm.gridsearch(param_grid, cv_params)
gs_summary = pd.concat([gs_summary, gs_results], ignore_index=True)
# Step 4
print("Performing Gridsearch Step-4")
param_grid = {"lambda_l1" : [i/10.0 for i in range(0,8)]}
lgbm = LGBM(params)
gs_results, params = lgbm.gridsearch(param_grid, cv_params)
gs_summary = pd.concat([gs_summary, gs_results], ignore_index=True)
# Step 5
print("Performing Gridsearch Step-5")
param_grid = {"lambda_l2" : [i/10.0 for i in range(0,8)]}
lgbm = LGBM(params)
gs_results, params = lgbm.gridsearch(param_grid, cv_params)
gs_summary = pd.concat([gs_summary, gs_results], ignore_index=True)
# Step 6
print("Performing Gridsearch Step-6")
param_grid = {"scale_pos_weight" : [92/8, 1]}
lgbm = LGBM(params)
gs_results, params = lgbm.gridsearch(param_grid, cv_params)
gs_summary = pd.concat([gs_summary, gs_results], ignore_index=True)
# Step 7
print("Performing Gridsearch Step-7")
param_grid = {"learning_rate" : [0.01,0.02, 0.03,0.05,0.08,0.1]}
lgbm = LGBM(params)
gs_results, params = lgbm.gridsearch(param_grid, cv_params)
gs_summary = pd.concat([gs_summary, gs_results], ignore_index=True)
print('All Iterations')
display(gs_summary)
print('Best parameters: ')
best_cv = gs_results.loc[gs_results['result'].idxmax()]
display(best_cv)
profile.End()
print('Time elapsed: %s mins' %str(profile.ElapsedMinutes))
# Save CV process
gs_summary.to_csv('../AllData_v4_OS_LGBM_GS.csv')
# Generate model by best iteration
print("Model training started...")
model = lgb.train(params=params,
train_set=lgb_train,
num_boost_round=int(best_cv[1]/0.8),
verbose_eval=1)
print("Model training completed...")
# Save model for possible coded ensemble
model.save_model('GridSearch/AllData_v4_OS_LGBM_Model', num_iteration=best_cv[1])
print("Predicting validation set...")
valid_preds = model.predict(valid_X)
print("Validation set prediction completed...")
print("Predicting test set...")
test_preds = model.predict(test_X)
print("Test set prediction completed...")
auc = roc_auc_score(valid_y, valid_preds)
print("Validation AUC: " + str(auc))
valid_preds = pd.DataFrame(valid_preds)
valid_preds.to_csv("GridSearch/AllData_v4_OS_GS_LGBM_ValidPreds.csv", index=False)
sub = pd.read_csv('../input/sample_submission.csv')
sub['TARGET'] = test_preds
sub.to_csv('GridSearch/AllData_v4_OS_GS_LGBM_Preds.csv', index=False) | LightGBM_AllData_v4_OS_CV_GS.py | import pandas as pd
import lightgbm as lgb
from sklearn.metrics import roc_auc_score
from Utils import Profiler
from IPython.display import display
from Estimators import LGBM
profile = Profiler()
profile.Start()
print("Loading resampled train data")
train_X = pd.read_csv("../input/AllData_v4_os.train")
train_X.pop("Unnamed: 0")
print("Loading resampled train labels")
train_y = pd.read_csv("../input/AllData_v4_os.label")
train_y = train_y.pop("TARGET")
print("Loading resampled validation data")
valid_X = pd.read_csv("../input/AllData_v4_os_valid.train")
valid_X.pop("Unnamed: 0")
print("Loading resampled validation labels")
valid_y = pd.read_csv("../input/AllData_v4_os_valid.label")
valid_y = valid_y.pop("TARGET")
print("Loading application test data")
test_X = pd.read_csv("../input/AllData_v4.test")
print("train_y shape: " + str(train_y.shape))
print("train_X shape: " + str(train_X.shape))
print("valid_y shape: " + str(valid_y.shape))
print("valid_X shape: " + str(valid_X.shape))
print("test_X shape: " + str(test_X.shape))
lgb_train = lgb.Dataset(train_X, train_y)
lgb_test = lgb.Dataset(valid_X)
# Define estimator parameters
params = {'task' :'train',
'objective' :'binary',
'learning_rate' :0.1,
'num_leaves' :31,
'max_depth' :8,
'min_data_in_leaf' :20,
'min_sum_hessian_in_leaf' :0.001,
'lambda_l1' :0,
'lambda_l2' :0,
'scale_pos_weight' :1,
'metric' :'auc',
'verbose' :-1}
# Parameters that are to be supplied to cross-validation
cv_params = {
"train_set" : lgb_train,
"num_boost_round" : 10000,
"nfold" : 5,
"early_stopping_rounds" : 50,
"verbose_eval" : 10
}
# Step 1
print("Performing Gridsearch Step-1")
param_grid = {"num_leaves" : range(10,101,10)}
lgbm = LGBM(params)
gs_results, params = lgbm.gridsearch(param_grid, cv_params)
gs_summary = gs_results
# Step 2
print("Performing Gridsearch Step-2")
param_grid = {"max_depth" : range(3,10,1)}
lgbm = LGBM(params)
gs_results, params = lgbm.gridsearch(param_grid, cv_params)
gs_summary = pd.concat([gs_summary, gs_results], ignore_index=True)
# Step 3
print("Performing Gridsearch Step-3")
param_grid = {"min_data_in_leaf" : range(10,81,10)}
lgbm = LGBM(params)
gs_results, params = lgbm.gridsearch(param_grid, cv_params)
gs_summary = pd.concat([gs_summary, gs_results], ignore_index=True)
# Step 4
print("Performing Gridsearch Step-4")
param_grid = {"lambda_l1" : [i/10.0 for i in range(0,8)]}
lgbm = LGBM(params)
gs_results, params = lgbm.gridsearch(param_grid, cv_params)
gs_summary = pd.concat([gs_summary, gs_results], ignore_index=True)
# Step 5
print("Performing Gridsearch Step-5")
param_grid = {"lambda_l2" : [i/10.0 for i in range(0,8)]}
lgbm = LGBM(params)
gs_results, params = lgbm.gridsearch(param_grid, cv_params)
gs_summary = pd.concat([gs_summary, gs_results], ignore_index=True)
# Step 6
print("Performing Gridsearch Step-6")
param_grid = {"scale_pos_weight" : [92/8, 1]}
lgbm = LGBM(params)
gs_results, params = lgbm.gridsearch(param_grid, cv_params)
gs_summary = pd.concat([gs_summary, gs_results], ignore_index=True)
# Step 7
print("Performing Gridsearch Step-7")
param_grid = {"learning_rate" : [0.01,0.02, 0.03,0.05,0.08,0.1]}
lgbm = LGBM(params)
gs_results, params = lgbm.gridsearch(param_grid, cv_params)
gs_summary = pd.concat([gs_summary, gs_results], ignore_index=True)
print('All Iterations')
display(gs_summary)
print('Best parameters: ')
best_cv = gs_results.loc[gs_results['result'].idxmax()]
display(best_cv)
profile.End()
print('Time elapsed: %s mins' %str(profile.ElapsedMinutes))
# Save CV process
gs_summary.to_csv('../AllData_v4_OS_LGBM_GS.csv')
# Generate model by best iteration
print("Model training started...")
model = lgb.train(params=params,
train_set=lgb_train,
num_boost_round=int(best_cv[1]/0.8),
verbose_eval=1)
print("Model training completed...")
# Save model for possible coded ensemble
model.save_model('GridSearch/AllData_v4_OS_LGBM_Model', num_iteration=best_cv[1])
print("Predicting validation set...")
valid_preds = model.predict(valid_X)
print("Validation set prediction completed...")
print("Predicting test set...")
test_preds = model.predict(test_X)
print("Test set prediction completed...")
auc = roc_auc_score(valid_y, valid_preds)
print("Validation AUC: " + str(auc))
valid_preds = pd.DataFrame(valid_preds)
valid_preds.to_csv("GridSearch/AllData_v4_OS_GS_LGBM_ValidPreds.csv", index=False)
sub = pd.read_csv('../input/sample_submission.csv')
sub['TARGET'] = test_preds
sub.to_csv('GridSearch/AllData_v4_OS_GS_LGBM_Preds.csv', index=False) | 0.331228 | 0.141489 |
import unittest, sys
from PyQt4 import QtCore, QtGui
from datafinder.core.search_restriction import SearchRestrictionParser
from datafinder.gui.user.dialogs.search_dialog.search_query_editor import SearchQueryEditor, SearchQueryAnalyzer
__version__ = "$Revision-Id:$"
class SearchQueryEditorTestCase(unittest.TestCase):
"""
Tests the auto completion text edit module.
"""
_availableProperties = ["Content", "Date time", "Content type descriptor"]
_availableComparisonOperators = ["=", "<", ">", ">=", "<=", "is"]
_availableConjuntionsOperators = ["AND", "OR"]
_application = QtGui.QApplication(sys.argv)
def setUp(self):
""" Setups the test fixture. """
self.autoComplEdit = SearchQueryEditor(None)
self.autoComplEdit.registerCompleter(QtGui.QCompleter(self._availableProperties),
SearchQueryAnalyzer.PROPERTY_TYPE)
completer = QtGui.QCompleter(self._availableComparisonOperators)
completer.setCaseSensitivity(QtCore.Qt.CaseInsensitive)
self.autoComplEdit.registerCompleter(completer, SearchQueryAnalyzer.COMPARISON_TYPE)
completer = QtGui.QCompleter(self._availableConjuntionsOperators)
completer.setCaseSensitivity(QtCore.Qt.CaseInsensitive)
self.autoComplEdit.registerCompleter(completer, SearchQueryAnalyzer.CONJUNCTION_TYPE)
self.autoComplEdit.registerCompleter(QtGui.QCompleter(["''"]),
SearchQueryAnalyzer.LITERAL_TYPE)
self._searchQueryAnalyzer = SearchQueryAnalyzer(SearchRestrictionParser(), dict())
self.autoComplEdit._searchQueryAnalyzer = self._searchQueryAnalyzer
def testPropertyCompletion(self):
""" Tests auto completion for property names. """
self.autoComplEdit.setText("Con")
self._requestAutocompletionAtPosition(3)
self.assertEquals(self.autoComplEdit.completer().currentCompletion(), "Content")
self.autoComplEdit.setText("Conz")
self._requestAutocompletionAtPosition(4)
self.assertEquals(self.autoComplEdit.completer().currentCompletion(), "")
def _requestAutocompletionAtPosition(self, position):
""" Sets the cursor position in the text editor. """
textCursor = self.autoComplEdit.textCursor()
textCursor.setPosition(position)
self.autoComplEdit.setTextCursor(textCursor)
self._searchQueryAnalyzer.analyze(unicode(self.autoComplEdit.toPlainText()))
self.autoComplEdit.keyPressEvent(QtGui.QKeyEvent(QtCore.QEvent.KeyPress, QtCore.Qt.Key_Space, QtCore.Qt.ControlModifier))
def testConjunctionCompletion(self):
""" Tests the completion of conjunction operators. """
self.autoComplEdit.setText("Content = 'tada' ")
self._requestAutocompletionAtPosition(17)
self.assertEquals(self.autoComplEdit.completer().currentCompletion(), "AND")
def testComparisonCompletion(self):
""" Tests the completion of comparison operators. """
self.autoComplEdit.setText("Content >")
self._requestAutocompletionAtPosition(9)
completions = list()
i = 0
while self.autoComplEdit.completer().setCurrentRow(i):
completions.append(self.autoComplEdit.completer().currentCompletion())
i += 1
self.assertEquals(completions, [">", ">="])
def testPropertyCompletionAfterConjunction(self):
""" Tests auto completion for property names after a conjunction. """
self.autoComplEdit.setText("Content = 'tada' and C")
self._requestAutocompletionAtPosition(22)
self.assertEquals(self.autoComplEdit.completer().currentCompletion(), "Content")
def testMultipleCompletion(self):
""" Tests the multiple use of auto completion in one query. """
self.autoComplEdit.setText("")
self._requestAutocompletionAtPosition(0)
self.autoComplEdit.insertCompletion(self.autoComplEdit.completer().currentCompletion())
self.assertEquals(self.autoComplEdit.toPlainText(), "Content ")
self._requestAutocompletionAtPosition(7)
self.autoComplEdit.insertCompletion(self.autoComplEdit.completer().currentCompletion())
self.assertEquals(self.autoComplEdit.toPlainText(), "Content ")
self._requestAutocompletionAtPosition(8)
self.autoComplEdit.insertCompletion(self.autoComplEdit.completer().currentCompletion())
self.assertEquals(self.autoComplEdit.toPlainText(), "Content = ")
self._requestAutocompletionAtPosition(10)
self.autoComplEdit.insertCompletion(self.autoComplEdit.completer().currentCompletion())
self.assertEquals(self.autoComplEdit.toPlainText(), "Content = '' ")
self._requestAutocompletionAtPosition(0)
self.autoComplEdit.insertCompletion(self.autoComplEdit.completer().currentCompletion())
self.assertEquals(self.autoComplEdit.toPlainText(), "Content = '' ")
self._requestAutocompletionAtPosition(4)
self.assertEquals(self.autoComplEdit.completer().currentCompletion(), "Content")
self.autoComplEdit.insertCompletion(self.autoComplEdit.completer().currentCompletion())
self.assertEquals(self.autoComplEdit.toPlainText(), "Content = '' ")
self.autoComplEdit.setText("Content = '' ")
self._requestAutocompletionAtPosition(8)
self.autoComplEdit.insertCompletion(self.autoComplEdit.completer().currentCompletion())
self.assertEquals(self.autoComplEdit.toPlainText(), "Content = = '' ")
def testConjunctionRecognition(self):
""" Tests the recognition of conjunction terms when already a character is typed. """
self.autoComplEdit.setText("Content = 'Peter hhg' o")
self._requestAutocompletionAtPosition(23)
self.assertEquals(self.autoComplEdit.completer().currentCompletion(), "OR")
def testConjunctionRecognitionWithNoTokenUnderCursor(self):
""" Tests the recognition of conjunction terms with no token under the cursor. """
self.autoComplEdit.setText("Content = 'Peter hhg' AND Content = '<NAME>hg'")
self._requestAutocompletionAtPosition(22)
self.assertEquals(self.autoComplEdit.completer().currentCompletion(), "AND")
def testConjunctionRecognitionWithTokenUnderCursor(self):
""" Tests the recognition of conjunction terms with token under the cursor. """
self.autoComplEdit.setText("Content = 'Peter hhg' AND NOT Content = 'Peter hhg'")
self._requestAutocompletionAtPosition(24)
self.assertEquals(self.autoComplEdit.completer().currentCompletion(), "AND") | test/unittest/datafinder_test/gui/user/dialogs/search_dialog/search_query_editor_test.py | import unittest, sys
from PyQt4 import QtCore, QtGui
from datafinder.core.search_restriction import SearchRestrictionParser
from datafinder.gui.user.dialogs.search_dialog.search_query_editor import SearchQueryEditor, SearchQueryAnalyzer
__version__ = "$Revision-Id:$"
class SearchQueryEditorTestCase(unittest.TestCase):
"""
Tests the auto completion text edit module.
"""
_availableProperties = ["Content", "Date time", "Content type descriptor"]
_availableComparisonOperators = ["=", "<", ">", ">=", "<=", "is"]
_availableConjuntionsOperators = ["AND", "OR"]
_application = QtGui.QApplication(sys.argv)
def setUp(self):
""" Setups the test fixture. """
self.autoComplEdit = SearchQueryEditor(None)
self.autoComplEdit.registerCompleter(QtGui.QCompleter(self._availableProperties),
SearchQueryAnalyzer.PROPERTY_TYPE)
completer = QtGui.QCompleter(self._availableComparisonOperators)
completer.setCaseSensitivity(QtCore.Qt.CaseInsensitive)
self.autoComplEdit.registerCompleter(completer, SearchQueryAnalyzer.COMPARISON_TYPE)
completer = QtGui.QCompleter(self._availableConjuntionsOperators)
completer.setCaseSensitivity(QtCore.Qt.CaseInsensitive)
self.autoComplEdit.registerCompleter(completer, SearchQueryAnalyzer.CONJUNCTION_TYPE)
self.autoComplEdit.registerCompleter(QtGui.QCompleter(["''"]),
SearchQueryAnalyzer.LITERAL_TYPE)
self._searchQueryAnalyzer = SearchQueryAnalyzer(SearchRestrictionParser(), dict())
self.autoComplEdit._searchQueryAnalyzer = self._searchQueryAnalyzer
def testPropertyCompletion(self):
""" Tests auto completion for property names. """
self.autoComplEdit.setText("Con")
self._requestAutocompletionAtPosition(3)
self.assertEquals(self.autoComplEdit.completer().currentCompletion(), "Content")
self.autoComplEdit.setText("Conz")
self._requestAutocompletionAtPosition(4)
self.assertEquals(self.autoComplEdit.completer().currentCompletion(), "")
def _requestAutocompletionAtPosition(self, position):
""" Sets the cursor position in the text editor. """
textCursor = self.autoComplEdit.textCursor()
textCursor.setPosition(position)
self.autoComplEdit.setTextCursor(textCursor)
self._searchQueryAnalyzer.analyze(unicode(self.autoComplEdit.toPlainText()))
self.autoComplEdit.keyPressEvent(QtGui.QKeyEvent(QtCore.QEvent.KeyPress, QtCore.Qt.Key_Space, QtCore.Qt.ControlModifier))
def testConjunctionCompletion(self):
""" Tests the completion of conjunction operators. """
self.autoComplEdit.setText("Content = 'tada' ")
self._requestAutocompletionAtPosition(17)
self.assertEquals(self.autoComplEdit.completer().currentCompletion(), "AND")
def testComparisonCompletion(self):
""" Tests the completion of comparison operators. """
self.autoComplEdit.setText("Content >")
self._requestAutocompletionAtPosition(9)
completions = list()
i = 0
while self.autoComplEdit.completer().setCurrentRow(i):
completions.append(self.autoComplEdit.completer().currentCompletion())
i += 1
self.assertEquals(completions, [">", ">="])
def testPropertyCompletionAfterConjunction(self):
""" Tests auto completion for property names after a conjunction. """
self.autoComplEdit.setText("Content = 'tada' and C")
self._requestAutocompletionAtPosition(22)
self.assertEquals(self.autoComplEdit.completer().currentCompletion(), "Content")
def testMultipleCompletion(self):
""" Tests the multiple use of auto completion in one query. """
self.autoComplEdit.setText("")
self._requestAutocompletionAtPosition(0)
self.autoComplEdit.insertCompletion(self.autoComplEdit.completer().currentCompletion())
self.assertEquals(self.autoComplEdit.toPlainText(), "Content ")
self._requestAutocompletionAtPosition(7)
self.autoComplEdit.insertCompletion(self.autoComplEdit.completer().currentCompletion())
self.assertEquals(self.autoComplEdit.toPlainText(), "Content ")
self._requestAutocompletionAtPosition(8)
self.autoComplEdit.insertCompletion(self.autoComplEdit.completer().currentCompletion())
self.assertEquals(self.autoComplEdit.toPlainText(), "Content = ")
self._requestAutocompletionAtPosition(10)
self.autoComplEdit.insertCompletion(self.autoComplEdit.completer().currentCompletion())
self.assertEquals(self.autoComplEdit.toPlainText(), "Content = '' ")
self._requestAutocompletionAtPosition(0)
self.autoComplEdit.insertCompletion(self.autoComplEdit.completer().currentCompletion())
self.assertEquals(self.autoComplEdit.toPlainText(), "Content = '' ")
self._requestAutocompletionAtPosition(4)
self.assertEquals(self.autoComplEdit.completer().currentCompletion(), "Content")
self.autoComplEdit.insertCompletion(self.autoComplEdit.completer().currentCompletion())
self.assertEquals(self.autoComplEdit.toPlainText(), "Content = '' ")
self.autoComplEdit.setText("Content = '' ")
self._requestAutocompletionAtPosition(8)
self.autoComplEdit.insertCompletion(self.autoComplEdit.completer().currentCompletion())
self.assertEquals(self.autoComplEdit.toPlainText(), "Content = = '' ")
def testConjunctionRecognition(self):
""" Tests the recognition of conjunction terms when already a character is typed. """
self.autoComplEdit.setText("Content = 'Peter hhg' o")
self._requestAutocompletionAtPosition(23)
self.assertEquals(self.autoComplEdit.completer().currentCompletion(), "OR")
def testConjunctionRecognitionWithNoTokenUnderCursor(self):
""" Tests the recognition of conjunction terms with no token under the cursor. """
self.autoComplEdit.setText("Content = 'Peter hhg' AND Content = '<NAME>hg'")
self._requestAutocompletionAtPosition(22)
self.assertEquals(self.autoComplEdit.completer().currentCompletion(), "AND")
def testConjunctionRecognitionWithTokenUnderCursor(self):
""" Tests the recognition of conjunction terms with token under the cursor. """
self.autoComplEdit.setText("Content = 'Peter hhg' AND NOT Content = 'Peter hhg'")
self._requestAutocompletionAtPosition(24)
self.assertEquals(self.autoComplEdit.completer().currentCompletion(), "AND") | 0.502686 | 0.203727 |
from unittest import TestCase,mock
from http import HTTPStatus
from gw_proxy._to_sync.anish_agarwal.API_SaaS_VPS_Client import API_SaaS_VPS_Client
class test_Rest_API_SaaS_VPs(TestCase):
@mock.patch('requests.get')
def test_proxy_for_brotli_encoded_sites(self, mockget):
# Mock response
headers = {
"Date" : "Sun, 01 Mar 2020 11:22:05 GMT",
"Expires" : "-1",
"Cache-Control" : "private, max-age=0",
"Content-Type" : "text/html; charset=UTF-8",
"Strict-Transport-Security" : "max-age=31536000",
"Accept-CH" : "DPR",
"Accept-CH-Lifetime" : "2592000",
"P3P" : "CP=\"This is not a P3P policy! See g.co/p3phelp for more info.\"",
"Content-Encoding" : "br",
"Content-Length" : "65251",
"X-XSS-Protection" : "0",
"X-Frame-Options" : "SAMEORIGIN",
"Set-Cookie" : "1P_JAR=2020-03-01-11; expires=Tue, 31-Mar-2020 11:22:06 GMT; path=/; domain=.google.com; Secure; SameSite=none, NID=199=UsFpcFnQ21COTv9q0Scd3ZUVZBDiHmy0Wts3igOy3v8iHYmlDnv7PbiF_JyecNwwWTUlzjNfp6-b50Igyf0c9CbkirOK9azAy6HWh1TLzCTSUJHbw6_tfZexErwcYNu1F9fXeIOUDJWUrC21DVSJsWg1cCpPrc9d71IbO-9X1dE; expires=Mon, 31-Aug-2020 11:22:05 GMT; path=/; domain=.google.com; Secure; HttpOnly; SameSite=none",
"Alt-Svc" : "quic=\":443\"; ma=2592000; v=\"46,43\",h3-Q050=\":443\"; ma=2592000,h3-Q049=\":443\"; ma=2592000,h3-Q048=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000"
}
params = {'headers.return_value': headers}
response = mock.Mock(**params)
response.headers = headers
response.content = b'some-test-bytes-string'
mockget.return_value = response
# The request
body = {}
path = '/'
method = 'GET'
domain_prefix = 'https://google.com'
self.request_headers = {
'accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36',
'accept-encoding' : 'gzip,deflate,br'
}
requestContext = {
"resourcePath" : "/",
"httpMethod" : "GET",
"path" : "/",
"protocol" : "HTTP/1.1",
"domainPrefix" : domain_prefix,
"domainName" : domain_prefix
}
event = {
"body" : body,
"path" : path,
"headers" : headers,
"httpMethod" : method,
"requestContext" : requestContext
}
api_saas_vps_client = API_SaaS_VPS_Client(event)
result = api_saas_vps_client.request_get()
assert result['isBase64Encoded'] == True
assert result['headers']['Content-Encoding'] == 'br'
assert result['statusCode'] == HTTPStatus.OK.value
assert result['body'] == 'c29tZS10ZXN0LWJ5dGVzLXN0cmluZw=='
@mock.patch('requests.get')
def test_proxy_for_gzip_encoded_sites(self, mockget):
# Mock response
headers = {
"Date" : "Sun, 01 Mar 2020 11:22:05 GMT",
"Expires" : "-1",
"Cache-Control" : "private, max-age=0",
"Content-Type" : "text/html; charset=UTF-8",
"Content-Encoding" : "gzip",
"Content-Length" : "65251",
"X-XSS-Protection" : "0",
"X-Frame-Options" : "SAMEORIGIN",
"Set-Cookie" : "1P_JAR=2020-03-01-11; expires=Tue, 31-Mar-2020 11:22:06 GMT; path=/; domain=.google.com; Secure; SameSite=none, NID=199=UsFpcFnQ21COTv9q0Scd3ZUVZBDiHmy0Wts3igOy3v8iHYmlDnv7PbiF_JyecNwwWTUlzjNfp6-b50Igyf0c9CbkirOK9azAy6HWh1TLzCTSUJHbw6_tfZexErwcYNu1F9fXeIOUDJWUrC21DVSJsWg1cCpPrc9d71IbO-9X1dE; expires=Mon, 31-Aug-2020 11:22:05 GMT; path=/; domain=.google.com; Secure; HttpOnly; SameSite=none",
"Alt-Svc" : "quic=\":443\"; ma=2592000; v=\"46,43\",h3-Q050=\":443\"; ma=2592000,h3-Q049=\":443\"; ma=2592000,h3-Q048=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000"
}
params = {'headers.return_value': headers}
response = mock.Mock(**params)
response.headers = headers
response.content = b'some-test-bytes-string'
mockget.return_value = response
# The request
body = {}
path = '/'
method = 'GET'
domain_prefix = 'https://google.com'
self.request_headers = {
'accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36',
'accept-encoding' : 'gzip,deflate,br'
}
requestContext = {
"resourcePath" : "/",
"httpMethod" : "GET",
"path" : "/",
"protocol" : "HTTP/1.1",
"domainPrefix" : domain_prefix,
"domainName" : domain_prefix
}
event = {
"body" : body,
"path" : path,
"headers" : headers,
"httpMethod" : method,
"requestContext" : requestContext
}
api_saas_vps_client = API_SaaS_VPS_Client(event)
result = api_saas_vps_client.request_get()
assert result['isBase64Encoded'] == True
assert 'Content-Encoding' not in result['headers']
assert result['statusCode'] == HTTPStatus.OK.value
assert result['body'] == 'c29tZS10ZXN0LWJ5dGVzLXN0cmluZw=='
@mock.patch('requests.get')
def test_proxy_for_unencoded_sites(self, mockget):
# Mock response
headers = {
"Date" : "Sun, 01 Mar 2020 11:22:05 GMT",
"Expires" : "-1",
"Cache-Control" : "private, max-age=0",
"Content-Type" : "text/html",
"Content-Length" : "65251",
"X-XSS-Protection" : "0",
"X-Frame-Options" : "SAMEORIGIN",
"Set-Cookie" : "1P_JAR=2020-03-01-11; expires=Tue, 31-Mar-2020 11:22:06 GMT; path=/; domain=.google.com; Secure; SameSite=none, NID=199=UsFpcFnQ21COTv9q0Scd3ZUVZBDiHmy0Wts3igOy3v8iHYmlDnv7PbiF_JyecNwwWTUlzjNfp6-b50Igyf0c9CbkirOK9azAy6HWh1TLzCTSUJHbw6_tfZexErwcYNu1F9fXeIOUDJWUrC21DVSJsWg1cCpPrc9d71IbO-9X1dE; expires=Mon, 31-Aug-2020 11:22:05 GMT; path=/; domain=.google.com; Secure; HttpOnly; SameSite=none",
"Alt-Svc" : "quic=\":443\"; ma=2592000; v=\"46,43\",h3-Q050=\":443\"; ma=2592000,h3-Q049=\":443\"; ma=2592000,h3-Q048=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000"
}
params = {'headers.return_value': headers}
response = mock.Mock(**params)
response.headers = headers
response.text = 'some-test-bytes-string'
mockget.return_value = response
# The request
body = {}
path = '/'
method = 'GET'
domain_prefix = 'https://google.com'
self.request_headers = {
'accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36',
'accept-encoding' : 'gzip,deflate,br'
}
requestContext = {
"resourcePath" : "/",
"httpMethod" : "GET",
"path" : "/",
"protocol" : "HTTP/1.1",
"domainPrefix" : domain_prefix,
"domainName" : domain_prefix
}
event = {
"body" : body,
"path" : path,
"headers" : headers,
"httpMethod" : method,
"requestContext" : requestContext
}
api_saas_vps_client = API_SaaS_VPS_Client(event)
result = api_saas_vps_client.request_get()
assert result['isBase64Encoded'] == False
assert 'Content-Encoding' not in result['headers']
assert result['statusCode'] == HTTPStatus.OK.value
assert result['body'] == 'some-test-bytes-string' | tests/_to_sync/anish_agarwal/test_Rest_API__SaaS_VPs.py | from unittest import TestCase,mock
from http import HTTPStatus
from gw_proxy._to_sync.anish_agarwal.API_SaaS_VPS_Client import API_SaaS_VPS_Client
class test_Rest_API_SaaS_VPs(TestCase):
@mock.patch('requests.get')
def test_proxy_for_brotli_encoded_sites(self, mockget):
# Mock response
headers = {
"Date" : "Sun, 01 Mar 2020 11:22:05 GMT",
"Expires" : "-1",
"Cache-Control" : "private, max-age=0",
"Content-Type" : "text/html; charset=UTF-8",
"Strict-Transport-Security" : "max-age=31536000",
"Accept-CH" : "DPR",
"Accept-CH-Lifetime" : "2592000",
"P3P" : "CP=\"This is not a P3P policy! See g.co/p3phelp for more info.\"",
"Content-Encoding" : "br",
"Content-Length" : "65251",
"X-XSS-Protection" : "0",
"X-Frame-Options" : "SAMEORIGIN",
"Set-Cookie" : "1P_JAR=2020-03-01-11; expires=Tue, 31-Mar-2020 11:22:06 GMT; path=/; domain=.google.com; Secure; SameSite=none, NID=199=UsFpcFnQ21COTv9q0Scd3ZUVZBDiHmy0Wts3igOy3v8iHYmlDnv7PbiF_JyecNwwWTUlzjNfp6-b50Igyf0c9CbkirOK9azAy6HWh1TLzCTSUJHbw6_tfZexErwcYNu1F9fXeIOUDJWUrC21DVSJsWg1cCpPrc9d71IbO-9X1dE; expires=Mon, 31-Aug-2020 11:22:05 GMT; path=/; domain=.google.com; Secure; HttpOnly; SameSite=none",
"Alt-Svc" : "quic=\":443\"; ma=2592000; v=\"46,43\",h3-Q050=\":443\"; ma=2592000,h3-Q049=\":443\"; ma=2592000,h3-Q048=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000"
}
params = {'headers.return_value': headers}
response = mock.Mock(**params)
response.headers = headers
response.content = b'some-test-bytes-string'
mockget.return_value = response
# The request
body = {}
path = '/'
method = 'GET'
domain_prefix = 'https://google.com'
self.request_headers = {
'accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36',
'accept-encoding' : 'gzip,deflate,br'
}
requestContext = {
"resourcePath" : "/",
"httpMethod" : "GET",
"path" : "/",
"protocol" : "HTTP/1.1",
"domainPrefix" : domain_prefix,
"domainName" : domain_prefix
}
event = {
"body" : body,
"path" : path,
"headers" : headers,
"httpMethod" : method,
"requestContext" : requestContext
}
api_saas_vps_client = API_SaaS_VPS_Client(event)
result = api_saas_vps_client.request_get()
assert result['isBase64Encoded'] == True
assert result['headers']['Content-Encoding'] == 'br'
assert result['statusCode'] == HTTPStatus.OK.value
assert result['body'] == 'c29tZS10ZXN0LWJ5dGVzLXN0cmluZw=='
@mock.patch('requests.get')
def test_proxy_for_gzip_encoded_sites(self, mockget):
# Mock response
headers = {
"Date" : "Sun, 01 Mar 2020 11:22:05 GMT",
"Expires" : "-1",
"Cache-Control" : "private, max-age=0",
"Content-Type" : "text/html; charset=UTF-8",
"Content-Encoding" : "gzip",
"Content-Length" : "65251",
"X-XSS-Protection" : "0",
"X-Frame-Options" : "SAMEORIGIN",
"Set-Cookie" : "1P_JAR=2020-03-01-11; expires=Tue, 31-Mar-2020 11:22:06 GMT; path=/; domain=.google.com; Secure; SameSite=none, NID=199=UsFpcFnQ21COTv9q0Scd3ZUVZBDiHmy0Wts3igOy3v8iHYmlDnv7PbiF_JyecNwwWTUlzjNfp6-b50Igyf0c9CbkirOK9azAy6HWh1TLzCTSUJHbw6_tfZexErwcYNu1F9fXeIOUDJWUrC21DVSJsWg1cCpPrc9d71IbO-9X1dE; expires=Mon, 31-Aug-2020 11:22:05 GMT; path=/; domain=.google.com; Secure; HttpOnly; SameSite=none",
"Alt-Svc" : "quic=\":443\"; ma=2592000; v=\"46,43\",h3-Q050=\":443\"; ma=2592000,h3-Q049=\":443\"; ma=2592000,h3-Q048=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000"
}
params = {'headers.return_value': headers}
response = mock.Mock(**params)
response.headers = headers
response.content = b'some-test-bytes-string'
mockget.return_value = response
# The request
body = {}
path = '/'
method = 'GET'
domain_prefix = 'https://google.com'
self.request_headers = {
'accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36',
'accept-encoding' : 'gzip,deflate,br'
}
requestContext = {
"resourcePath" : "/",
"httpMethod" : "GET",
"path" : "/",
"protocol" : "HTTP/1.1",
"domainPrefix" : domain_prefix,
"domainName" : domain_prefix
}
event = {
"body" : body,
"path" : path,
"headers" : headers,
"httpMethod" : method,
"requestContext" : requestContext
}
api_saas_vps_client = API_SaaS_VPS_Client(event)
result = api_saas_vps_client.request_get()
assert result['isBase64Encoded'] == True
assert 'Content-Encoding' not in result['headers']
assert result['statusCode'] == HTTPStatus.OK.value
assert result['body'] == 'c29tZS10ZXN0LWJ5dGVzLXN0cmluZw=='
@mock.patch('requests.get')
def test_proxy_for_unencoded_sites(self, mockget):
# Mock response
headers = {
"Date" : "Sun, 01 Mar 2020 11:22:05 GMT",
"Expires" : "-1",
"Cache-Control" : "private, max-age=0",
"Content-Type" : "text/html",
"Content-Length" : "65251",
"X-XSS-Protection" : "0",
"X-Frame-Options" : "SAMEORIGIN",
"Set-Cookie" : "1P_JAR=2020-03-01-11; expires=Tue, 31-Mar-2020 11:22:06 GMT; path=/; domain=.google.com; Secure; SameSite=none, NID=199=UsFpcFnQ21COTv9q0Scd3ZUVZBDiHmy0Wts3igOy3v8iHYmlDnv7PbiF_JyecNwwWTUlzjNfp6-b50Igyf0c9CbkirOK9azAy6HWh1TLzCTSUJHbw6_tfZexErwcYNu1F9fXeIOUDJWUrC21DVSJsWg1cCpPrc9d71IbO-9X1dE; expires=Mon, 31-Aug-2020 11:22:05 GMT; path=/; domain=.google.com; Secure; HttpOnly; SameSite=none",
"Alt-Svc" : "quic=\":443\"; ma=2592000; v=\"46,43\",h3-Q050=\":443\"; ma=2592000,h3-Q049=\":443\"; ma=2592000,h3-Q048=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000"
}
params = {'headers.return_value': headers}
response = mock.Mock(**params)
response.headers = headers
response.text = 'some-test-bytes-string'
mockget.return_value = response
# The request
body = {}
path = '/'
method = 'GET'
domain_prefix = 'https://google.com'
self.request_headers = {
'accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36',
'accept-encoding' : 'gzip,deflate,br'
}
requestContext = {
"resourcePath" : "/",
"httpMethod" : "GET",
"path" : "/",
"protocol" : "HTTP/1.1",
"domainPrefix" : domain_prefix,
"domainName" : domain_prefix
}
event = {
"body" : body,
"path" : path,
"headers" : headers,
"httpMethod" : method,
"requestContext" : requestContext
}
api_saas_vps_client = API_SaaS_VPS_Client(event)
result = api_saas_vps_client.request_get()
assert result['isBase64Encoded'] == False
assert 'Content-Encoding' not in result['headers']
assert result['statusCode'] == HTTPStatus.OK.value
assert result['body'] == 'some-test-bytes-string' | 0.437223 | 0.153771 |
import unittest
import sys
import json
from unittest import TestCase
from ovs.lib.tests.mockups import StorageDriverModule, StorageDriverClient
from ovs.plugin.provider.configuration import Configuration
from ovs.extensions.storage.persistentfactory import PersistentFactory
from ovs.extensions.storage.persistent.dummystore import DummyPersistentStore
from ovs.extensions.storage.volatilefactory import VolatileFactory
from ovs.extensions.storage.volatile.dummystore import DummyVolatileStore
class MDSServices(TestCase):
"""
This test class will validate the various scenarios of the delete snapshots logic
"""
VDisk = None
MDSService = None
ServiceType = None
MDSServiceVDisk = None
VPool = None
PMachine = None
Service = None
StorageRouter = None
StorageDriver = None
BackendType = None
VolatileMutex = None
MDSServiceController = None
logLevel = None
@classmethod
def setUpClass(cls):
"""
Sets up the unittest, mocking a certain set of 3rd party libraries and extensions.
This makes sure the unittests can be executed without those libraries installed
"""
# Load dummy stores
PersistentFactory.store = DummyPersistentStore()
VolatileFactory.store = DummyVolatileStore()
# Replace mocked classes
sys.modules['ovs.extensions.storageserver.storagedriver'] = StorageDriverModule
# Import required modules/classes after mocking is done
from ovs.dal.hybrids.vdisk import VDisk
from ovs.dal.hybrids.service import Service
from ovs.dal.hybrids.vpool import VPool
from ovs.dal.hybrids.storagerouter import StorageRouter
from ovs.dal.hybrids.pmachine import PMachine
from ovs.dal.hybrids.servicetype import ServiceType
from ovs.dal.hybrids.storagedriver import StorageDriver
from ovs.dal.hybrids.backendtype import BackendType
from ovs.dal.hybrids.j_mdsservice import MDSService
from ovs.dal.hybrids.j_mdsservicevdisk import MDSServiceVDisk
from ovs.extensions.generic.volatilemutex import VolatileMutex
from ovs.lib.mdsservice import MDSServiceController
# Globalize mocked classes
global VDisk
global VPool
global Service
global StorageRouter
global StorageDriver
global BackendType
global PMachine
global MDSService
global ServiceType
global MDSServiceVDisk
global VolatileMutex
global MDSServiceController
_ = VDisk(), VPool(), Service(), MDSService(), MDSServiceVDisk(), ServiceType(), \
StorageRouter(), StorageDriver(), BackendType(), PMachine(), \
VolatileMutex('dummy'), MDSServiceController
# Configuration
def _get(key):
c = PersistentFactory.get_client()
if c.exists(key):
return c.get(key)
return None
def _get_int(key):
return int(Configuration.get(key))
Configuration.get = staticmethod(_get)
Configuration.getInt = staticmethod(_get_int)
# Cleaning storage
VolatileFactory.store.clean()
PersistentFactory.store.clean()
@classmethod
def setUp(cls):
"""
(Re)Sets the stores on every test
"""
PersistentFactory.store = DummyPersistentStore()
PersistentFactory.store.clean()
VolatileFactory.store = DummyVolatileStore()
VolatileFactory.store.clean()
@classmethod
def tearDownClass(cls):
"""
Clean up the unittest
"""
pass
def _generate_nc_function(self, address, mds_service):
"""
Generates the lambda that will return the address or ip
"""
_ = self
if address is True:
return lambda s: mds_service.service.storagerouter.ip
return lambda s: int(mds_service.service.ports[0])
def _generate_bc_function(self, _configs):
"""
Generates the lambda that will return the config list
"""
_ = self
return lambda s: _configs
def _build_service_structure(self, structure):
"""
Builds an MDS service structure
"""
vpools = {}
storagerouters = {}
storagedrivers = {}
services = {}
mds_services = {}
service_type = ServiceType()
service_type.name = 'MetadataServer'
service_type.save()
for vpool_id in structure['vpools']:
vpool = VPool()
vpool.name = str(vpool_id)
vpool.backend_type = BackendType()
vpool.save()
vpools[vpool_id] = vpool
for sr_id in structure['storagerouters']:
storagerouter = StorageRouter()
storagerouter.name = str(sr_id)
storagerouter.ip = '10.0.0.{0}'.format(sr_id)
storagerouter.pmachine = PMachine()
storagerouter.save()
storagerouters[sr_id] = storagerouter
for sd_info in structure['storagedrivers']:
sd_id, vpool_id, sr_id = sd_info
storagedriver = StorageDriver()
storagedriver.vpool = vpools[vpool_id]
storagedriver.storagerouter = storagerouters[sr_id]
storagedriver.name = str(sd_id)
storagedriver.mountpoint_temp = '/'
storagedriver.mountpoint_foc = '/'
storagedriver.mountpoint_readcache2 = '/'
storagedriver.mountpoint_writecache = '/'
storagedriver.mountpoint_readcache1 = '/'
storagedriver.mountpoint_temp = '/'
storagedriver.mountpoint_md = '/'
storagedriver.mountpoint_bfs = '/'
storagedriver.mountpoint = '/'
storagedriver.cluster_ip = storagerouters[sr_id].ip
storagedriver.storage_ip = '127.0.0.1'
storagedriver.storagedriver_id = str(sd_id)
storagedriver.ports = [1, 2, 3]
storagedriver.save()
storagedrivers[sd_id] = storagedriver
for mds_info in structure['mds_services']:
mds_id, sd_id = mds_info
sd = storagedrivers[sd_id]
s_id = '{0}-{1}'.format(sd.storagerouter.name, mds_id)
service = Service()
service.name = s_id
service.storagerouter = sd.storagerouter
service.ports = [mds_id]
service.type = service_type
service.save()
services[s_id] = service
mds_service = MDSService()
mds_service.service = service
mds_service.number = 0
mds_service.capacity = 10
mds_service.vpool = sd.vpool
mds_service.save()
mds_services[mds_id] = mds_service
return vpools, storagerouters, storagedrivers, services, mds_services, service_type
def _create_vdisks_for_mds_service(self, amount, start_id, mds_service=None, vpool=None):
"""
Generates vdisks and appends them to a given mds_service
"""
vdisks = {}
for i in xrange(start_id, start_id + amount):
disk = VDisk()
disk.name = str(i)
disk.devicename = 'disk_{0}'.format(i)
disk.volume_id = 'disk_{0}'.format(i)
disk.vpool = mds_service.vpool if mds_service is not None else vpool
disk.size = 0
disk.save()
disk.reload_client()
if mds_service is not None:
storagedriver_id = None
for sd in mds_service.vpool.storagedrivers:
if sd.storagerouter_guid == mds_service.service.storagerouter_guid:
storagedriver_id = sd.storagedriver_id
junction = MDSServiceVDisk()
junction.vdisk = disk
junction.mds_service = mds_service
junction.is_master = True
junction.save()
config = type('MDSNodeConfig', (),
{'address': self._generate_nc_function(True, mds_service),
'port': self._generate_nc_function(False, mds_service)})()
mds_backend_config = type('MDSMetaDataBackendConfig', (),
{'node_configs': self._generate_bc_function([config])})()
StorageDriverClient.metadata_backend_config['disk_{0}'.format(i)] = mds_backend_config
StorageDriverClient.catch_up['disk_{0}'.format(i)] = 50
StorageDriverClient.vrouter_id['disk_{0}'.format(i)] = storagedriver_id
vdisks[i] = disk
return vdisks
def test_load_calculation(self):
"""
Validates whether the load calculation works
"""
vpools, storagerouters, storagedrivers, services, mds_services, _ = self._build_service_structure(
{'vpools': [1],
'storagerouters': [1],
'storagedrivers': [(1, 1, 1)], # (<id>, <vpool_id>, <sr_id>)
'mds_services': [(1, 1)]} # (<id>, <sd_id>)
)
mds_service = mds_services[1]
self._create_vdisks_for_mds_service(2, 1, mds_service=mds_service)
load, load_plus = MDSServiceController.get_mds_load(mds_service)
self.assertEqual(load, 20, 'There should be a 20% load. {0}'.format(load))
self.assertEqual(load_plus, 30, 'There should be a 30% plus load. {0}'.format(load_plus))
self._create_vdisks_for_mds_service(3, 3, mds_service=mds_service)
load, load_plus = MDSServiceController.get_mds_load(mds_service)
self.assertEqual(load, 50, 'There should be a 50% load. {0}'.format(load))
self.assertEqual(load_plus, 60, 'There should be a 60% plus load. {0}'.format(load_plus))
self._create_vdisks_for_mds_service(5, 6, mds_service=mds_service)
load, load_plus = MDSServiceController.get_mds_load(mds_service)
self.assertEqual(load, 100, 'There should be a 100% load. {0}'.format(load))
self.assertEqual(load_plus, 110, 'There should be a 110% plus load. {0}'.format(load_plus))
mds_service.capacity = -1
mds_service.save()
load, load_plus = MDSServiceController.get_mds_load(mds_service)
self.assertEqual(load, 50, 'There should be a 50% load. {0}'.format(load))
self.assertEqual(load_plus, 50, 'There should be a 50% plus load. {0}'.format(load_plus))
mds_service.capacity = 0
mds_service.save()
load, load_plus = MDSServiceController.get_mds_load(mds_service)
self.assertEqual(load, float('inf'), 'There should be infinite load. {0}'.format(load))
self.assertEqual(load_plus, float('inf'), 'There should be infinite plus load. {0}'.format(load_plus))
def test_storagedriver_config_set(self):
"""
Validates whether storagedriver configuration is generated as expected
"""
PersistentFactory.get_client().set('ovs.storagedriver.mds.safety', '3')
vpools, storagerouters, storagedrivers, services, mds_services, _ = self._build_service_structure(
{'vpools': [1, 2],
'storagerouters': [1, 2, 3, 4, 5, 6],
'storagedrivers': [(1, 1, 1), (2, 1, 2), (3, 1, 3), (4, 1, 4), (5, 2, 4), (6, 2, 5), (7, 2, 6)], # (<id>, <vpool_id>, <sr_id>)
'mds_services': [(1, 1), (2, 1), (3, 2), (4, 3), (5, 4), (6, 5), (7, 6), (8, 7), (9, 7)]} # (<id>, <sd_id>)
)
vdisks = {}
start_id = 1
for mds_service in mds_services.itervalues():
vdisks.update(self._create_vdisks_for_mds_service(10, start_id, mds_service=mds_service))
start_id += 10
mds_services[1].capacity = 11 # on 1, vpool 1
mds_services[1].save()
mds_services[2].capacity = 20 # on 1, vpool 1
mds_services[2].save()
mds_services[3].capacity = 12 # on 2, vpool 1
mds_services[3].save()
mds_services[4].capacity = 14 # on 3, vpool 1
mds_services[4].save()
mds_services[5].capacity = 16 # on 4, vpool 1
mds_services[5].save()
mds_services[6].capacity = 11 # on 4, vpool 2
mds_services[6].save()
mds_services[7].capacity = 13 # on 5, vpool 2
mds_services[7].save()
mds_services[8].capacity = 19 # on 6, vpool 2
mds_services[8].save()
mds_services[9].capacity = 15 # on 6, vpool 2
mds_services[9].save()
config = MDSServiceController.get_mds_storagedriver_config_set(vpools[1])
expected = {storagerouters[1].guid: [{'host': '10.0.0.1', 'port': 2},
{'host': '10.0.0.4', 'port': 5},
{'host': '10.0.0.3', 'port': 4}],
storagerouters[2].guid: [{'host': '10.0.0.2', 'port': 3},
{'host': '10.0.0.1', 'port': 2},
{'host': '10.0.0.4', 'port': 5}],
storagerouters[3].guid: [{'host': '10.0.0.3', 'port': 4},
{'host': '10.0.0.1', 'port': 2},
{'host': '10.0.0.4', 'port': 5}],
storagerouters[4].guid: [{'host': '10.0.0.4', 'port': 5},
{'host': '10.0.0.1', 'port': 2},
{'host': '10.0.0.3', 'port': 4}]}
self.assertDictEqual(config, expected, 'Test 1. Got:\n{0}'.format(json.dumps(config, indent=2)))
mds_services[2].capacity = 10 # on 1, vpool 1
mds_services[2].save()
config = MDSServiceController.get_mds_storagedriver_config_set(vpools[1])
expected = {storagerouters[1].guid: [{'host': '10.0.0.1', 'port': 1},
{'host': '10.0.0.4', 'port': 5},
{'host': '10.0.0.3', 'port': 4}],
storagerouters[2].guid: [{'host': '10.0.0.2', 'port': 3},
{'host': '10.0.0.4', 'port': 5},
{'host': '10.0.0.3', 'port': 4}],
storagerouters[3].guid: [{'host': '10.0.0.3', 'port': 4},
{'host': '10.0.0.4', 'port': 5},
{'host': '10.0.0.2', 'port': 3}],
storagerouters[4].guid: [{'host': '10.0.0.4', 'port': 5},
{'host': '10.0.0.3', 'port': 4},
{'host': '10.0.0.2', 'port': 3}]}
self.assertDictEqual(config, expected, 'Test 2. Got:\n{0}'.format(json.dumps(config, indent=2)))
def test_syncreality(self):
"""
Validates whether reality is synced to the model as expected
"""
def _generate_backend_config(_scenario, _vdisks, _mds_services):
"""
Injects a backend config that would be returned by the storagedriver
"""
for disk_id in _scenario:
configs = []
for mds_id in _scenario[disk_id]:
config = type('MDSNodeConfig', (), {'address': self._generate_nc_function(True, _mds_services[mds_id]),
'port': self._generate_nc_function(False, _mds_services[mds_id])})()
configs.append(config)
mds_backend_config = type('MDSMetaDataBackendConfig', (), {'node_configs': self._generate_bc_function(configs)})()
StorageDriverClient.metadata_backend_config[_vdisks[disk_id].volume_id] = mds_backend_config
def _validate_scenario(_scenario, _vdisks, _mds_services):
"""
Validates a scenario with the model
"""
for disk_id in _scenario:
expected_mds_services = []
for mds_id in _scenario[disk_id]:
expected_mds_services.append(_mds_services[mds_id])
disk = _vdisks[disk_id]
self.assertEqual(len(disk.mds_services), len(expected_mds_services))
for junction in disk.mds_services:
self.assertIn(junction.mds_service, expected_mds_services)
def _test_scenario(scenario, _vdisks, _mds_services):
"""
Executes a testrun for a given scenario
"""
_generate_backend_config(scenario, _vdisks, _mds_services)
for vdisk_id in _vdisks:
MDSServiceController.sync_vdisk_to_reality(_vdisks[vdisk_id])
_validate_scenario(scenario, _vdisks, _mds_services)
vpools, _, _, _, mds_services, _ = self._build_service_structure(
{'vpools': [1],
'storagerouters': [1, 2, 3, 4],
'storagedrivers': [(1, 1, 1), (2, 1, 2), (3, 1, 3), (4, 1, 4)], # (<id>, <vpool_id>, <sr_id>)
'mds_services': [(1, 1), (2, 1), (3, 2), (4, 3), (5, 4)]} # (<id>, <sd_id>)
)
vdisks = self._create_vdisks_for_mds_service(5, 1, vpool=vpools[1])
_test_scenario({1: [1, 3, 4], 2: [1, 2], 3: [1, 3, 4], 4: [3, 4, 5], 5: [1, 4, 5]},
vdisks, mds_services)
_test_scenario({1: [1, 2], 2: [1, 2, 3, 4, 5], 3: [1, 2], 4: [5], 5: [1, 4, 5]},
vdisks, mds_services)
def test_ensure_safety(self):
"""
Validates whether the ensure_safety call works as expected
"""
def _generate_mds_service_load_repr(_mds_service):
"""
Generates a load representing thing for a given mds_service
"""
masters, slaves = 0, 0
for _junction in _mds_service.vdisks:
if _junction.is_master:
masters += 1
else:
slaves += 1
capacity = _mds_service.capacity
if capacity == -1:
capacity = 'infinite'
_load, _ = MDSServiceController.get_mds_load(_mds_service)
if _load == float('inf'):
_load = 'infinite'
else:
_load = round(_load, 2)
return [_mds_service.service.storagerouter.ip, _mds_service.service.ports[0], masters, slaves, capacity, _load]
def _check_reality(_configs, _loads, _vdisks, _mds_services, test=True, display=False):
"""
Validates 'reality' with an expected config/load
"""
reality_configs = []
for _vdisk_id in _vdisks:
reality_configs.append(_vdisks[_vdisk_id].info['metadata_backend_config'])
if display is True:
for c in reality_configs:
print c
if test is True:
self.assertListEqual(reality_configs, _configs)
reality_loads = []
for mds_id in _mds_services:
reality_loads.append(_generate_mds_service_load_repr(_mds_services[mds_id]))
if display is True:
for l in reality_loads:
print l
if test is True:
self.assertListEqual(reality_loads, _loads)
PersistentFactory.get_client().set('ovs.storagedriver.mds.safety', '3')
PersistentFactory.get_client().set('ovs.storagedriver.mds.maxload', '75')
PersistentFactory.get_client().set('ovs.storagedriver.mds.tlogs', '100')
vpools, storagerouters, storagedrivers, _, mds_services, service_type = self._build_service_structure(
{'vpools': [1],
'storagerouters': [1, 2, 3, 4],
'storagedrivers': [(1, 1, 1), (2, 1, 2), (3, 1, 3), (4, 1, 4)], # (<id>, <vpool_id>, <sr_id>)
'mds_services': [(1, 1), (2, 2), (3, 3), (4, 4)]} # (<id>, <sd_id>)
)
vdisks = {}
start_id = 1
for mds_service in mds_services.itervalues():
vdisks.update(self._create_vdisks_for_mds_service(2, start_id, mds_service=mds_service))
start_id += 2
# Validate the start configuration which is simple, each disk has only its default local master
configs = [[{'ip': '10.0.0.1', 'port': 1}],
[{'ip': '10.0.0.1', 'port': 1}],
[{'ip': '10.0.0.2', 'port': 2}],
[{'ip': '10.0.0.2', 'port': 2}],
[{'ip': '10.0.0.3', 'port': 3}],
[{'ip': '10.0.0.3', 'port': 3}],
[{'ip': '10.0.0.4', 'port': 4}],
[{'ip': '10.0.0.4', 'port': 4}]]
loads = [['10.0.0.1', 1, 2, 0, 10, 20.0],
['10.0.0.2', 2, 2, 0, 10, 20.0],
['10.0.0.3', 3, 2, 0, 10, 20.0],
['10.0.0.4', 4, 2, 0, 10, 20.0]]
_check_reality(configs, loads, vdisks, mds_services)
# Validate first run. Each disk should now have sufficient nodes, since there are plenty of MDS services available
configs = [[{'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.2', 'port': 2}, {'ip': '10.0.0.3', 'port': 3}],
[{'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.2', 'port': 2}],
[{'ip': '10.0.0.2', 'port': 2}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.3', 'port': 3}],
[{'ip': '10.0.0.2', 'port': 2}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.4', 'port': 4}],
[{'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.2', 'port': 2}],
[{'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.1', 'port': 1}],
[{'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.2', 'port': 2}],
[{'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.1', 'port': 1}]]
loads = [['10.0.0.1', 1, 2, 5, 10, 70.0],
['10.0.0.2', 2, 2, 4, 10, 60.0],
['10.0.0.3', 3, 2, 4, 10, 60.0],
['10.0.0.4', 4, 2, 3, 10, 50.0]]
for vdisk_id in sorted(vdisks.keys()):
MDSServiceController.ensure_safety(vdisks[vdisk_id])
_check_reality(configs, loads, vdisks, mds_services)
# Validate whether this extra (unnessecairy) run doesn't change anything, preventing reconfiguring over and
# over again
for vdisk_id in sorted(vdisks.keys()):
MDSServiceController.ensure_safety(vdisks[vdisk_id])
_check_reality(configs, loads, vdisks, mds_services)
# Validating whether an overloaded node will cause correct rebalancing
mds_services[2].capacity = 2
mds_services[2].save()
configs = [[{'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.4', 'port': 4}],
[{'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}],
[{'ip': '10.0.0.2', 'port': 2}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.3', 'port': 3}],
[{'ip': '10.0.0.2', 'port': 2}, {'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.1', 'port': 1}],
[{'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.4', 'port': 4}],
[{'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.1', 'port': 1}],
[{'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}],
[{'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.1', 'port': 1}]]
loads = [['10.0.0.1', 1, 2, 5, 10, 70.0],
['10.0.0.2', 2, 2, 0, 2, 100.0],
['10.0.0.3', 3, 2, 5, 10, 70.0],
['10.0.0.4', 4, 2, 5, 10, 70.0]]
for vdisk_id in sorted(vdisks.keys()):
MDSServiceController.ensure_safety(vdisks[vdisk_id])
_check_reality(configs, loads, vdisks, mds_services)
# Validate whether the overloaded services are still handled. In this case, causing a reoder of the slaves as
# ordered in the model
configs = [[{'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.4', 'port': 4}],
[{'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}],
[{'ip': '10.0.0.2', 'port': 2}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.3', 'port': 3}],
[{'ip': '10.0.0.2', 'port': 2}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.4', 'port': 4}],
[{'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.4', 'port': 4}],
[{'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.1', 'port': 1}],
[{'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}],
[{'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.1', 'port': 1}]]
loads = [['10.0.0.1', 1, 2, 5, 10, 70.0],
['10.0.0.2', 2, 2, 0, 2, 100.0],
['10.0.0.3', 3, 2, 5, 10, 70.0],
['10.0.0.4', 4, 2, 5, 10, 70.0]]
for vdisk_id in sorted(vdisks.keys()):
MDSServiceController.ensure_safety(vdisks[vdisk_id])
_check_reality(configs, loads, vdisks, mds_services)
# Again, validating whether a subsequent run doesn't give unexpected changes
for vdisk_id in sorted(vdisks.keys()):
MDSServiceController.ensure_safety(vdisks[vdisk_id])
_check_reality(configs, loads, vdisks, mds_services)
# A MDS service will be added (next to the overloaded service), this should cause the expected rebalancing
s_id = '{0}-5'.format(storagerouters[2].name)
service = Service()
service.name = s_id
service.storagerouter = storagerouters[2]
service.ports = [5]
service.type = service_type
service.save()
mds_service = MDSService()
mds_service.service = service
mds_service.number = 0
mds_service.capacity = 10
mds_service.vpool = vpools[1]
mds_service.save()
mds_services[5] = mds_service
configs = [[{'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.4', 'port': 4}],
[{'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}],
[{'ip': '10.0.0.2', 'port': 2}, {'ip': '10.0.0.2', 'port': 5}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.3', 'port': 3}],
[{'ip': '10.0.0.2', 'port': 2}, {'ip': '10.0.0.2', 'port': 5}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.4', 'port': 4}],
[{'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.4', 'port': 4}],
[{'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.1', 'port': 1}],
[{'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.2', 'port': 5}],
[{'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.1', 'port': 1}]]
loads = [['10.0.0.1', 1, 2, 5, 10, 70.0],
['10.0.0.2', 2, 2, 0, 2, 100.0],
['10.0.0.3', 3, 2, 5, 10, 70.0],
['10.0.0.4', 4, 2, 5, 10, 70.0],
['10.0.0.2', 5, 0, 3, 10, 30.0]]
for vdisk_id in sorted(vdisks.keys()):
MDSServiceController.ensure_safety(vdisks[vdisk_id])
_check_reality(configs, loads, vdisks, mds_services)
# If the tlogs are not catched up, nothing should be changed
for vdisk_id in [3, 4]:
StorageDriverClient.catch_up[vdisks[vdisk_id].volume_id] = 1000
for vdisk_id in sorted(vdisks.keys()):
MDSServiceController.ensure_safety(vdisks[vdisk_id])
_check_reality(configs, loads, vdisks, mds_services)
# The next run, after tlogs are catched up, a master switch should be executed
for vdisk_id in [3, 4]:
StorageDriverClient.catch_up[vdisks[vdisk_id].volume_id] = 50
configs = [[{'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.4', 'port': 4}],
[{'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}],
[{'ip': '10.0.0.2', 'port': 5}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.3', 'port': 3}],
[{'ip': '10.0.0.2', 'port': 2}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.4', 'port': 4}],
[{'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.4', 'port': 4}],
[{'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.1', 'port': 1}],
[{'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.2', 'port': 5}],
[{'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.1', 'port': 1}]]
loads = [['10.0.0.1', 1, 2, 5, 10, 70.0],
['10.0.0.2', 2, 1, 0, 2, 50.0],
['10.0.0.3', 3, 2, 5, 10, 70.0],
['10.0.0.4', 4, 2, 5, 10, 70.0],
['10.0.0.2', 5, 1, 1, 10, 20.0]]
for vdisk_id in sorted(vdisks.keys()):
MDSServiceController.ensure_safety(vdisks[vdisk_id])
_check_reality(configs, loads, vdisks, mds_services)
# Validate whether a volume migration makes the master follow
StorageDriverClient.vrouter_id[vdisks[1].volume_id] = storagedrivers[3].storagedriver_id
configs = [[{'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.2', 'port': 5}],
[{'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}],
[{'ip': '10.0.0.2', 'port': 5}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.3', 'port': 3}],
[{'ip': '10.0.0.2', 'port': 2}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.4', 'port': 4}],
[{'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.4', 'port': 4}],
[{'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.1', 'port': 1}],
[{'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.2', 'port': 5}],
[{'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.1', 'port': 1}]]
loads = [['10.0.0.1', 1, 1, 6, 10, 70.0],
['10.0.0.2', 2, 1, 0, 2, 50.0],
['10.0.0.3', 3, 3, 4, 10, 70.0],
['10.0.0.4', 4, 2, 4, 10, 60.0],
['10.0.0.2', 5, 1, 2, 10, 30.0]]
for vdisk_id in sorted(vdisks.keys()):
MDSServiceController.ensure_safety(vdisks[vdisk_id])
_check_reality(configs, loads, vdisks, mds_services)
# Validates if a second run doesn't change anything
for vdisk_id in sorted(vdisks.keys()):
MDSServiceController.ensure_safety(vdisks[vdisk_id])
_check_reality(configs, loads, vdisks, mds_services)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(MDSServices)
unittest.TextTestRunner(verbosity=2).run(suite) | ovs/lib/tests/test_mdsservice.py | import unittest
import sys
import json
from unittest import TestCase
from ovs.lib.tests.mockups import StorageDriverModule, StorageDriverClient
from ovs.plugin.provider.configuration import Configuration
from ovs.extensions.storage.persistentfactory import PersistentFactory
from ovs.extensions.storage.persistent.dummystore import DummyPersistentStore
from ovs.extensions.storage.volatilefactory import VolatileFactory
from ovs.extensions.storage.volatile.dummystore import DummyVolatileStore
class MDSServices(TestCase):
"""
This test class will validate the various scenarios of the delete snapshots logic
"""
VDisk = None
MDSService = None
ServiceType = None
MDSServiceVDisk = None
VPool = None
PMachine = None
Service = None
StorageRouter = None
StorageDriver = None
BackendType = None
VolatileMutex = None
MDSServiceController = None
logLevel = None
@classmethod
def setUpClass(cls):
"""
Sets up the unittest, mocking a certain set of 3rd party libraries and extensions.
This makes sure the unittests can be executed without those libraries installed
"""
# Load dummy stores
PersistentFactory.store = DummyPersistentStore()
VolatileFactory.store = DummyVolatileStore()
# Replace mocked classes
sys.modules['ovs.extensions.storageserver.storagedriver'] = StorageDriverModule
# Import required modules/classes after mocking is done
from ovs.dal.hybrids.vdisk import VDisk
from ovs.dal.hybrids.service import Service
from ovs.dal.hybrids.vpool import VPool
from ovs.dal.hybrids.storagerouter import StorageRouter
from ovs.dal.hybrids.pmachine import PMachine
from ovs.dal.hybrids.servicetype import ServiceType
from ovs.dal.hybrids.storagedriver import StorageDriver
from ovs.dal.hybrids.backendtype import BackendType
from ovs.dal.hybrids.j_mdsservice import MDSService
from ovs.dal.hybrids.j_mdsservicevdisk import MDSServiceVDisk
from ovs.extensions.generic.volatilemutex import VolatileMutex
from ovs.lib.mdsservice import MDSServiceController
# Globalize mocked classes
global VDisk
global VPool
global Service
global StorageRouter
global StorageDriver
global BackendType
global PMachine
global MDSService
global ServiceType
global MDSServiceVDisk
global VolatileMutex
global MDSServiceController
_ = VDisk(), VPool(), Service(), MDSService(), MDSServiceVDisk(), ServiceType(), \
StorageRouter(), StorageDriver(), BackendType(), PMachine(), \
VolatileMutex('dummy'), MDSServiceController
# Configuration
def _get(key):
c = PersistentFactory.get_client()
if c.exists(key):
return c.get(key)
return None
def _get_int(key):
return int(Configuration.get(key))
Configuration.get = staticmethod(_get)
Configuration.getInt = staticmethod(_get_int)
# Cleaning storage
VolatileFactory.store.clean()
PersistentFactory.store.clean()
@classmethod
def setUp(cls):
"""
(Re)Sets the stores on every test
"""
PersistentFactory.store = DummyPersistentStore()
PersistentFactory.store.clean()
VolatileFactory.store = DummyVolatileStore()
VolatileFactory.store.clean()
@classmethod
def tearDownClass(cls):
"""
Clean up the unittest
"""
pass
def _generate_nc_function(self, address, mds_service):
"""
Generates the lambda that will return the address or ip
"""
_ = self
if address is True:
return lambda s: mds_service.service.storagerouter.ip
return lambda s: int(mds_service.service.ports[0])
def _generate_bc_function(self, _configs):
"""
Generates the lambda that will return the config list
"""
_ = self
return lambda s: _configs
def _build_service_structure(self, structure):
"""
Builds an MDS service structure
"""
vpools = {}
storagerouters = {}
storagedrivers = {}
services = {}
mds_services = {}
service_type = ServiceType()
service_type.name = 'MetadataServer'
service_type.save()
for vpool_id in structure['vpools']:
vpool = VPool()
vpool.name = str(vpool_id)
vpool.backend_type = BackendType()
vpool.save()
vpools[vpool_id] = vpool
for sr_id in structure['storagerouters']:
storagerouter = StorageRouter()
storagerouter.name = str(sr_id)
storagerouter.ip = '10.0.0.{0}'.format(sr_id)
storagerouter.pmachine = PMachine()
storagerouter.save()
storagerouters[sr_id] = storagerouter
for sd_info in structure['storagedrivers']:
sd_id, vpool_id, sr_id = sd_info
storagedriver = StorageDriver()
storagedriver.vpool = vpools[vpool_id]
storagedriver.storagerouter = storagerouters[sr_id]
storagedriver.name = str(sd_id)
storagedriver.mountpoint_temp = '/'
storagedriver.mountpoint_foc = '/'
storagedriver.mountpoint_readcache2 = '/'
storagedriver.mountpoint_writecache = '/'
storagedriver.mountpoint_readcache1 = '/'
storagedriver.mountpoint_temp = '/'
storagedriver.mountpoint_md = '/'
storagedriver.mountpoint_bfs = '/'
storagedriver.mountpoint = '/'
storagedriver.cluster_ip = storagerouters[sr_id].ip
storagedriver.storage_ip = '127.0.0.1'
storagedriver.storagedriver_id = str(sd_id)
storagedriver.ports = [1, 2, 3]
storagedriver.save()
storagedrivers[sd_id] = storagedriver
for mds_info in structure['mds_services']:
mds_id, sd_id = mds_info
sd = storagedrivers[sd_id]
s_id = '{0}-{1}'.format(sd.storagerouter.name, mds_id)
service = Service()
service.name = s_id
service.storagerouter = sd.storagerouter
service.ports = [mds_id]
service.type = service_type
service.save()
services[s_id] = service
mds_service = MDSService()
mds_service.service = service
mds_service.number = 0
mds_service.capacity = 10
mds_service.vpool = sd.vpool
mds_service.save()
mds_services[mds_id] = mds_service
return vpools, storagerouters, storagedrivers, services, mds_services, service_type
def _create_vdisks_for_mds_service(self, amount, start_id, mds_service=None, vpool=None):
"""
Generates vdisks and appends them to a given mds_service
"""
vdisks = {}
for i in xrange(start_id, start_id + amount):
disk = VDisk()
disk.name = str(i)
disk.devicename = 'disk_{0}'.format(i)
disk.volume_id = 'disk_{0}'.format(i)
disk.vpool = mds_service.vpool if mds_service is not None else vpool
disk.size = 0
disk.save()
disk.reload_client()
if mds_service is not None:
storagedriver_id = None
for sd in mds_service.vpool.storagedrivers:
if sd.storagerouter_guid == mds_service.service.storagerouter_guid:
storagedriver_id = sd.storagedriver_id
junction = MDSServiceVDisk()
junction.vdisk = disk
junction.mds_service = mds_service
junction.is_master = True
junction.save()
config = type('MDSNodeConfig', (),
{'address': self._generate_nc_function(True, mds_service),
'port': self._generate_nc_function(False, mds_service)})()
mds_backend_config = type('MDSMetaDataBackendConfig', (),
{'node_configs': self._generate_bc_function([config])})()
StorageDriverClient.metadata_backend_config['disk_{0}'.format(i)] = mds_backend_config
StorageDriverClient.catch_up['disk_{0}'.format(i)] = 50
StorageDriverClient.vrouter_id['disk_{0}'.format(i)] = storagedriver_id
vdisks[i] = disk
return vdisks
def test_load_calculation(self):
"""
Validates whether the load calculation works
"""
vpools, storagerouters, storagedrivers, services, mds_services, _ = self._build_service_structure(
{'vpools': [1],
'storagerouters': [1],
'storagedrivers': [(1, 1, 1)], # (<id>, <vpool_id>, <sr_id>)
'mds_services': [(1, 1)]} # (<id>, <sd_id>)
)
mds_service = mds_services[1]
self._create_vdisks_for_mds_service(2, 1, mds_service=mds_service)
load, load_plus = MDSServiceController.get_mds_load(mds_service)
self.assertEqual(load, 20, 'There should be a 20% load. {0}'.format(load))
self.assertEqual(load_plus, 30, 'There should be a 30% plus load. {0}'.format(load_plus))
self._create_vdisks_for_mds_service(3, 3, mds_service=mds_service)
load, load_plus = MDSServiceController.get_mds_load(mds_service)
self.assertEqual(load, 50, 'There should be a 50% load. {0}'.format(load))
self.assertEqual(load_plus, 60, 'There should be a 60% plus load. {0}'.format(load_plus))
self._create_vdisks_for_mds_service(5, 6, mds_service=mds_service)
load, load_plus = MDSServiceController.get_mds_load(mds_service)
self.assertEqual(load, 100, 'There should be a 100% load. {0}'.format(load))
self.assertEqual(load_plus, 110, 'There should be a 110% plus load. {0}'.format(load_plus))
mds_service.capacity = -1
mds_service.save()
load, load_plus = MDSServiceController.get_mds_load(mds_service)
self.assertEqual(load, 50, 'There should be a 50% load. {0}'.format(load))
self.assertEqual(load_plus, 50, 'There should be a 50% plus load. {0}'.format(load_plus))
mds_service.capacity = 0
mds_service.save()
load, load_plus = MDSServiceController.get_mds_load(mds_service)
self.assertEqual(load, float('inf'), 'There should be infinite load. {0}'.format(load))
self.assertEqual(load_plus, float('inf'), 'There should be infinite plus load. {0}'.format(load_plus))
def test_storagedriver_config_set(self):
"""
Validates whether storagedriver configuration is generated as expected
"""
PersistentFactory.get_client().set('ovs.storagedriver.mds.safety', '3')
vpools, storagerouters, storagedrivers, services, mds_services, _ = self._build_service_structure(
{'vpools': [1, 2],
'storagerouters': [1, 2, 3, 4, 5, 6],
'storagedrivers': [(1, 1, 1), (2, 1, 2), (3, 1, 3), (4, 1, 4), (5, 2, 4), (6, 2, 5), (7, 2, 6)], # (<id>, <vpool_id>, <sr_id>)
'mds_services': [(1, 1), (2, 1), (3, 2), (4, 3), (5, 4), (6, 5), (7, 6), (8, 7), (9, 7)]} # (<id>, <sd_id>)
)
vdisks = {}
start_id = 1
for mds_service in mds_services.itervalues():
vdisks.update(self._create_vdisks_for_mds_service(10, start_id, mds_service=mds_service))
start_id += 10
mds_services[1].capacity = 11 # on 1, vpool 1
mds_services[1].save()
mds_services[2].capacity = 20 # on 1, vpool 1
mds_services[2].save()
mds_services[3].capacity = 12 # on 2, vpool 1
mds_services[3].save()
mds_services[4].capacity = 14 # on 3, vpool 1
mds_services[4].save()
mds_services[5].capacity = 16 # on 4, vpool 1
mds_services[5].save()
mds_services[6].capacity = 11 # on 4, vpool 2
mds_services[6].save()
mds_services[7].capacity = 13 # on 5, vpool 2
mds_services[7].save()
mds_services[8].capacity = 19 # on 6, vpool 2
mds_services[8].save()
mds_services[9].capacity = 15 # on 6, vpool 2
mds_services[9].save()
config = MDSServiceController.get_mds_storagedriver_config_set(vpools[1])
expected = {storagerouters[1].guid: [{'host': '10.0.0.1', 'port': 2},
{'host': '10.0.0.4', 'port': 5},
{'host': '10.0.0.3', 'port': 4}],
storagerouters[2].guid: [{'host': '10.0.0.2', 'port': 3},
{'host': '10.0.0.1', 'port': 2},
{'host': '10.0.0.4', 'port': 5}],
storagerouters[3].guid: [{'host': '10.0.0.3', 'port': 4},
{'host': '10.0.0.1', 'port': 2},
{'host': '10.0.0.4', 'port': 5}],
storagerouters[4].guid: [{'host': '10.0.0.4', 'port': 5},
{'host': '10.0.0.1', 'port': 2},
{'host': '10.0.0.3', 'port': 4}]}
self.assertDictEqual(config, expected, 'Test 1. Got:\n{0}'.format(json.dumps(config, indent=2)))
mds_services[2].capacity = 10 # on 1, vpool 1
mds_services[2].save()
config = MDSServiceController.get_mds_storagedriver_config_set(vpools[1])
expected = {storagerouters[1].guid: [{'host': '10.0.0.1', 'port': 1},
{'host': '10.0.0.4', 'port': 5},
{'host': '10.0.0.3', 'port': 4}],
storagerouters[2].guid: [{'host': '10.0.0.2', 'port': 3},
{'host': '10.0.0.4', 'port': 5},
{'host': '10.0.0.3', 'port': 4}],
storagerouters[3].guid: [{'host': '10.0.0.3', 'port': 4},
{'host': '10.0.0.4', 'port': 5},
{'host': '10.0.0.2', 'port': 3}],
storagerouters[4].guid: [{'host': '10.0.0.4', 'port': 5},
{'host': '10.0.0.3', 'port': 4},
{'host': '10.0.0.2', 'port': 3}]}
self.assertDictEqual(config, expected, 'Test 2. Got:\n{0}'.format(json.dumps(config, indent=2)))
def test_syncreality(self):
"""
Validates whether reality is synced to the model as expected
"""
def _generate_backend_config(_scenario, _vdisks, _mds_services):
"""
Injects a backend config that would be returned by the storagedriver
"""
for disk_id in _scenario:
configs = []
for mds_id in _scenario[disk_id]:
config = type('MDSNodeConfig', (), {'address': self._generate_nc_function(True, _mds_services[mds_id]),
'port': self._generate_nc_function(False, _mds_services[mds_id])})()
configs.append(config)
mds_backend_config = type('MDSMetaDataBackendConfig', (), {'node_configs': self._generate_bc_function(configs)})()
StorageDriverClient.metadata_backend_config[_vdisks[disk_id].volume_id] = mds_backend_config
def _validate_scenario(_scenario, _vdisks, _mds_services):
"""
Validates a scenario with the model
"""
for disk_id in _scenario:
expected_mds_services = []
for mds_id in _scenario[disk_id]:
expected_mds_services.append(_mds_services[mds_id])
disk = _vdisks[disk_id]
self.assertEqual(len(disk.mds_services), len(expected_mds_services))
for junction in disk.mds_services:
self.assertIn(junction.mds_service, expected_mds_services)
def _test_scenario(scenario, _vdisks, _mds_services):
"""
Executes a testrun for a given scenario
"""
_generate_backend_config(scenario, _vdisks, _mds_services)
for vdisk_id in _vdisks:
MDSServiceController.sync_vdisk_to_reality(_vdisks[vdisk_id])
_validate_scenario(scenario, _vdisks, _mds_services)
vpools, _, _, _, mds_services, _ = self._build_service_structure(
{'vpools': [1],
'storagerouters': [1, 2, 3, 4],
'storagedrivers': [(1, 1, 1), (2, 1, 2), (3, 1, 3), (4, 1, 4)], # (<id>, <vpool_id>, <sr_id>)
'mds_services': [(1, 1), (2, 1), (3, 2), (4, 3), (5, 4)]} # (<id>, <sd_id>)
)
vdisks = self._create_vdisks_for_mds_service(5, 1, vpool=vpools[1])
_test_scenario({1: [1, 3, 4], 2: [1, 2], 3: [1, 3, 4], 4: [3, 4, 5], 5: [1, 4, 5]},
vdisks, mds_services)
_test_scenario({1: [1, 2], 2: [1, 2, 3, 4, 5], 3: [1, 2], 4: [5], 5: [1, 4, 5]},
vdisks, mds_services)
def test_ensure_safety(self):
"""
Validates whether the ensure_safety call works as expected
"""
def _generate_mds_service_load_repr(_mds_service):
"""
Generates a load representing thing for a given mds_service
"""
masters, slaves = 0, 0
for _junction in _mds_service.vdisks:
if _junction.is_master:
masters += 1
else:
slaves += 1
capacity = _mds_service.capacity
if capacity == -1:
capacity = 'infinite'
_load, _ = MDSServiceController.get_mds_load(_mds_service)
if _load == float('inf'):
_load = 'infinite'
else:
_load = round(_load, 2)
return [_mds_service.service.storagerouter.ip, _mds_service.service.ports[0], masters, slaves, capacity, _load]
def _check_reality(_configs, _loads, _vdisks, _mds_services, test=True, display=False):
"""
Validates 'reality' with an expected config/load
"""
reality_configs = []
for _vdisk_id in _vdisks:
reality_configs.append(_vdisks[_vdisk_id].info['metadata_backend_config'])
if display is True:
for c in reality_configs:
print c
if test is True:
self.assertListEqual(reality_configs, _configs)
reality_loads = []
for mds_id in _mds_services:
reality_loads.append(_generate_mds_service_load_repr(_mds_services[mds_id]))
if display is True:
for l in reality_loads:
print l
if test is True:
self.assertListEqual(reality_loads, _loads)
PersistentFactory.get_client().set('ovs.storagedriver.mds.safety', '3')
PersistentFactory.get_client().set('ovs.storagedriver.mds.maxload', '75')
PersistentFactory.get_client().set('ovs.storagedriver.mds.tlogs', '100')
vpools, storagerouters, storagedrivers, _, mds_services, service_type = self._build_service_structure(
{'vpools': [1],
'storagerouters': [1, 2, 3, 4],
'storagedrivers': [(1, 1, 1), (2, 1, 2), (3, 1, 3), (4, 1, 4)], # (<id>, <vpool_id>, <sr_id>)
'mds_services': [(1, 1), (2, 2), (3, 3), (4, 4)]} # (<id>, <sd_id>)
)
vdisks = {}
start_id = 1
for mds_service in mds_services.itervalues():
vdisks.update(self._create_vdisks_for_mds_service(2, start_id, mds_service=mds_service))
start_id += 2
# Validate the start configuration which is simple, each disk has only its default local master
configs = [[{'ip': '10.0.0.1', 'port': 1}],
[{'ip': '10.0.0.1', 'port': 1}],
[{'ip': '10.0.0.2', 'port': 2}],
[{'ip': '10.0.0.2', 'port': 2}],
[{'ip': '10.0.0.3', 'port': 3}],
[{'ip': '10.0.0.3', 'port': 3}],
[{'ip': '10.0.0.4', 'port': 4}],
[{'ip': '10.0.0.4', 'port': 4}]]
loads = [['10.0.0.1', 1, 2, 0, 10, 20.0],
['10.0.0.2', 2, 2, 0, 10, 20.0],
['10.0.0.3', 3, 2, 0, 10, 20.0],
['10.0.0.4', 4, 2, 0, 10, 20.0]]
_check_reality(configs, loads, vdisks, mds_services)
# Validate first run. Each disk should now have sufficient nodes, since there are plenty of MDS services available
configs = [[{'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.2', 'port': 2}, {'ip': '10.0.0.3', 'port': 3}],
[{'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.2', 'port': 2}],
[{'ip': '10.0.0.2', 'port': 2}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.3', 'port': 3}],
[{'ip': '10.0.0.2', 'port': 2}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.4', 'port': 4}],
[{'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.2', 'port': 2}],
[{'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.1', 'port': 1}],
[{'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.2', 'port': 2}],
[{'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.1', 'port': 1}]]
loads = [['10.0.0.1', 1, 2, 5, 10, 70.0],
['10.0.0.2', 2, 2, 4, 10, 60.0],
['10.0.0.3', 3, 2, 4, 10, 60.0],
['10.0.0.4', 4, 2, 3, 10, 50.0]]
for vdisk_id in sorted(vdisks.keys()):
MDSServiceController.ensure_safety(vdisks[vdisk_id])
_check_reality(configs, loads, vdisks, mds_services)
# Validate whether this extra (unnessecairy) run doesn't change anything, preventing reconfiguring over and
# over again
for vdisk_id in sorted(vdisks.keys()):
MDSServiceController.ensure_safety(vdisks[vdisk_id])
_check_reality(configs, loads, vdisks, mds_services)
# Validating whether an overloaded node will cause correct rebalancing
mds_services[2].capacity = 2
mds_services[2].save()
configs = [[{'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.4', 'port': 4}],
[{'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}],
[{'ip': '10.0.0.2', 'port': 2}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.3', 'port': 3}],
[{'ip': '10.0.0.2', 'port': 2}, {'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.1', 'port': 1}],
[{'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.4', 'port': 4}],
[{'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.1', 'port': 1}],
[{'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}],
[{'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.1', 'port': 1}]]
loads = [['10.0.0.1', 1, 2, 5, 10, 70.0],
['10.0.0.2', 2, 2, 0, 2, 100.0],
['10.0.0.3', 3, 2, 5, 10, 70.0],
['10.0.0.4', 4, 2, 5, 10, 70.0]]
for vdisk_id in sorted(vdisks.keys()):
MDSServiceController.ensure_safety(vdisks[vdisk_id])
_check_reality(configs, loads, vdisks, mds_services)
# Validate whether the overloaded services are still handled. In this case, causing a reoder of the slaves as
# ordered in the model
configs = [[{'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.4', 'port': 4}],
[{'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}],
[{'ip': '10.0.0.2', 'port': 2}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.3', 'port': 3}],
[{'ip': '10.0.0.2', 'port': 2}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.4', 'port': 4}],
[{'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.4', 'port': 4}],
[{'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.1', 'port': 1}],
[{'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}],
[{'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.1', 'port': 1}]]
loads = [['10.0.0.1', 1, 2, 5, 10, 70.0],
['10.0.0.2', 2, 2, 0, 2, 100.0],
['10.0.0.3', 3, 2, 5, 10, 70.0],
['10.0.0.4', 4, 2, 5, 10, 70.0]]
for vdisk_id in sorted(vdisks.keys()):
MDSServiceController.ensure_safety(vdisks[vdisk_id])
_check_reality(configs, loads, vdisks, mds_services)
# Again, validating whether a subsequent run doesn't give unexpected changes
for vdisk_id in sorted(vdisks.keys()):
MDSServiceController.ensure_safety(vdisks[vdisk_id])
_check_reality(configs, loads, vdisks, mds_services)
# A MDS service will be added (next to the overloaded service), this should cause the expected rebalancing
s_id = '{0}-5'.format(storagerouters[2].name)
service = Service()
service.name = s_id
service.storagerouter = storagerouters[2]
service.ports = [5]
service.type = service_type
service.save()
mds_service = MDSService()
mds_service.service = service
mds_service.number = 0
mds_service.capacity = 10
mds_service.vpool = vpools[1]
mds_service.save()
mds_services[5] = mds_service
configs = [[{'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.4', 'port': 4}],
[{'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}],
[{'ip': '10.0.0.2', 'port': 2}, {'ip': '10.0.0.2', 'port': 5}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.3', 'port': 3}],
[{'ip': '10.0.0.2', 'port': 2}, {'ip': '10.0.0.2', 'port': 5}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.4', 'port': 4}],
[{'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.4', 'port': 4}],
[{'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.1', 'port': 1}],
[{'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.2', 'port': 5}],
[{'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.1', 'port': 1}]]
loads = [['10.0.0.1', 1, 2, 5, 10, 70.0],
['10.0.0.2', 2, 2, 0, 2, 100.0],
['10.0.0.3', 3, 2, 5, 10, 70.0],
['10.0.0.4', 4, 2, 5, 10, 70.0],
['10.0.0.2', 5, 0, 3, 10, 30.0]]
for vdisk_id in sorted(vdisks.keys()):
MDSServiceController.ensure_safety(vdisks[vdisk_id])
_check_reality(configs, loads, vdisks, mds_services)
# If the tlogs are not catched up, nothing should be changed
for vdisk_id in [3, 4]:
StorageDriverClient.catch_up[vdisks[vdisk_id].volume_id] = 1000
for vdisk_id in sorted(vdisks.keys()):
MDSServiceController.ensure_safety(vdisks[vdisk_id])
_check_reality(configs, loads, vdisks, mds_services)
# The next run, after tlogs are catched up, a master switch should be executed
for vdisk_id in [3, 4]:
StorageDriverClient.catch_up[vdisks[vdisk_id].volume_id] = 50
configs = [[{'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.4', 'port': 4}],
[{'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}],
[{'ip': '10.0.0.2', 'port': 5}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.3', 'port': 3}],
[{'ip': '10.0.0.2', 'port': 2}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.4', 'port': 4}],
[{'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.4', 'port': 4}],
[{'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.1', 'port': 1}],
[{'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.2', 'port': 5}],
[{'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.1', 'port': 1}]]
loads = [['10.0.0.1', 1, 2, 5, 10, 70.0],
['10.0.0.2', 2, 1, 0, 2, 50.0],
['10.0.0.3', 3, 2, 5, 10, 70.0],
['10.0.0.4', 4, 2, 5, 10, 70.0],
['10.0.0.2', 5, 1, 1, 10, 20.0]]
for vdisk_id in sorted(vdisks.keys()):
MDSServiceController.ensure_safety(vdisks[vdisk_id])
_check_reality(configs, loads, vdisks, mds_services)
# Validate whether a volume migration makes the master follow
StorageDriverClient.vrouter_id[vdisks[1].volume_id] = storagedrivers[3].storagedriver_id
configs = [[{'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.2', 'port': 5}],
[{'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}],
[{'ip': '10.0.0.2', 'port': 5}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.3', 'port': 3}],
[{'ip': '10.0.0.2', 'port': 2}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.4', 'port': 4}],
[{'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.4', 'port': 4}],
[{'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.1', 'port': 1}],
[{'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.2', 'port': 5}],
[{'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.1', 'port': 1}]]
loads = [['10.0.0.1', 1, 1, 6, 10, 70.0],
['10.0.0.2', 2, 1, 0, 2, 50.0],
['10.0.0.3', 3, 3, 4, 10, 70.0],
['10.0.0.4', 4, 2, 4, 10, 60.0],
['10.0.0.2', 5, 1, 2, 10, 30.0]]
for vdisk_id in sorted(vdisks.keys()):
MDSServiceController.ensure_safety(vdisks[vdisk_id])
_check_reality(configs, loads, vdisks, mds_services)
# Validates if a second run doesn't change anything
for vdisk_id in sorted(vdisks.keys()):
MDSServiceController.ensure_safety(vdisks[vdisk_id])
_check_reality(configs, loads, vdisks, mds_services)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(MDSServices)
unittest.TextTestRunner(verbosity=2).run(suite) | 0.459561 | 0.294481 |
from multiprocessing.sharedctypes import Value
from pprint import pprint
import cv2
import numpy as np
from functools import cached_property
from itertools import count
from collections import defaultdict
from skimage.metrics import structural_similarity as ssim
from tuning import hyperparams
# Used to assign unique ids to the entity.
counter = count()
class Tracker:
'''
Singleton in charge of tracking objects moving in the scenes, by associating newly detected blobs
with the previously detected entities.
'''
def __init__(self, context):
'''
Parameters:
- context: The parent IntrusionDetection object, which some global scene information
'''
self.context = context
self.entities_by_frame = defaultdict(list)
def get_entities(self, frame_index) -> list['Entity']:
'''
Retrieve all entities detected in a particular frame
'''
return self.entities_by_frame[frame_index]
def track_frame(self, blobs: list['Blob'], frame_index: int, frame,
bg_diff):
'''
Tries to associate newly detected blobs with previous entities.
In doing so, it updates the classification scores by running an heuristic function.
Parameters:
- blobs: A list of the newly detected Blob objects
- frame_index: the index of the current frame
- frame: The current frame image
- bg_diff: The difference between the current frame and the detected background
'''
current_entities = []
for b in blobs:
for e in self.entities_by_frame[frame_index - 1]:
if b.similar(e.blob) and e not in current_entities:
e.update(b)
current_entities.append(e)
break
else:
current_entities.append(Entity(b, frame_index))
for e in current_entities:
e.compute_scores(frame, bg_diff, self.context.bg)
self.entities_by_frame[frame_index] = current_entities
return self.entities_by_frame[frame_index]
class Entity(object):
'''
Track the evolution of a single blob over time.
'''
@property
def blob(self) -> 'Blob':
'''
The latest blob object associated with this entity.
'''
return self._blob
@property
def speedX(self) -> float:
'''
How much the blob's centroid has moved along the X axis.
'''
return self._speedX
@property
def speedY(self) -> float:
'''
How much the blob's centroid has moved along the Y axis.
'''
return self._speedY
@property
def color(self):
'''
A utility function used to label the detected object's class with color (BGR).
Returns:
- An RGB triplet: Blue for a person class, Red for a false object, Green for a true object.
'''
if self.classification == 'person':
return (255, 0, 0)
if self.classification == 'true_o':
return (0, 255, 0)
if self.classification == 'false_o':
return (0, 0, 255)
else:
return ValueError('Class not found')
@property
def id(self) -> str:
'''
The unique id associated with the entity.
The id is in the format #FXXXX_YYYY where XXXX corresponds to the frame index where the entity
is initially detected, and YYYY is a progressive unique integer.
'''
return self._id
def __init__(self, blob: 'Blob', frameNumber):
self._id = f'#F{frameNumber:04}_{counter.__next__():04}'
# print('Creating Entity', self._id)
self._blob = blob
self._prev_blob = blob
self._speedX = 0.0
self._speedY = 0.0
self._personScore = 0.0
self._objectScore = 0.0
@property
def classification(self) -> str:
'''
Returns the detected class of the entity.
'''
if self._personScore > 0:
return 'person'
elif self._objectScore > 0:
return 'true_o'
else:
return 'false_o'
@staticmethod
def CSVHeader() -> str:
return 'id; area; perimeter; aspect; centroidX; centroidY; boundingRectX; boundingRectY; boundingRectH; boundingRectW; classification'
def toCSV(self, sep='; ') -> str:
'''
Return all the geometric properties related to the entity's shape.
'''
return sep.join([self.id] + list(
map(lambda x: str(int(1000 * x) / (1000.0)), [
self.blob.area,
self.blob.perimeter,
self.blob.aspect,
self.blob.centroid[0],
self.blob.centroid[1],
])) + [
str(self.blob.bounding_rect)[1:-1].replace(', ', sep),
self.classification
])
def __repr__(self) -> str:
return f'{self._id}\tclass: {self.classification}\tps: {self._personScore}\tos: {self._objectScore}'
def update(self, blob: 'Blob'):
'''
Evolve the entity's history with a new blob, and recompute it's speed.
'''
self._prev_blob = self._blob
self._blob = blob
self._speedX = self._blob.centroid[0] - self._prev_blob.centroid[0]
self._speedY = self._blob.centroid[1] - self._prev_blob.centroid[1]
def compute_scores(self, frame, diff, bg):
'''
Update the classification scores with the heuristic.
'''
if abs(self.speedX) > 1 or abs(self.speedY) > 1:
self._personScore = min(self._personScore + 1, 40)
else:
self._personScore = max(self._personScore - 1, -40)
if self._check_contour_similarity(frame, diff):
self._objectScore = min(self._objectScore + 1, 40)
else:
self._objectScore = max(self._objectScore - 1, -40)
def _check_contour_similarity(self,
frame,
diff,
match_threshold=0.2) -> bool:
'''
Private function used to detect true or false object, by using the structural similarity index measure.
Parameters:
- frame: the original frame
- diff: the frame after background subtraction
Returns:
- True if the detected similarity greater or equal to the provided threshold.
'''
x, y, w, h = self.blob.bounding_rect
if (w < 7 or h < 7):
return False
frame_roi = frame[y:y + h, x:x + w, 0]
diff_roi = diff[y:y + h, x:x + w]
ssim_score = ssim(frame_roi,
diff_roi,
data_range=diff_roi.max() - diff_roi.min())
return ssim_score >= match_threshold
class Blob:
'''
Represents a single blob, without making any temporal assumption.
'''
@cached_property
def aspect(self):
x, y, w, h = cv2.boundingRect(self._contour)
return float(w) / h
@cached_property
def solidity(self):
area = cv2.contourArea(self._contour)
hull = cv2.convexHull(self._contour)
hull_area = cv2.contourArea(hull)
return float(area) / (hull_area + 0.00001)
@cached_property
def extent(self):
area = cv2.contourArea(self._contour)
_, _, w, h = cv2.boundingRect(self._contour)
return float(area) / (w * h)
@cached_property
def moments(self):
return cv2.moments(self._contour)
@cached_property
def centroid(self):
return (self.moments['m10'] / (self.moments['m00'] + 0.00001),
self.moments['m01'] / (self.moments['m00'] + 0.00001))
@cached_property
def perimeter(self):
return cv2.arcLength(self._contour, True)
@cached_property
def area(self):
return cv2.contourArea(self._contour)
@cached_property
def bounding_rect(self):
return cv2.boundingRect(self._contour)
@property
def contour(self):
return self._contour
def corner(self, corner_code):
'''
Obtain the coordinates of a corner of the bounding rect.
Parameters:
- corner_code: Must be one of 'tl', 'tr', 'bl', 'br' (t = top, b = bottom, l = left, r = right)
'''
x, y, h, w = self.bounding_rect
if corner_code == 'br':
return (x + w, y + h)
elif corner_code == 'bl':
return (x, y + h)
elif corner_code == 'tl':
return (x, y)
elif corner_code == 'tr':
return (x + w, y)
else:
raise ValueError('Expected one of (tl, tr, bl, br)')
def intersection_area(self, other):
'''
Compute the intersection area between this blob's contour and another one.
'''
sx, sy = self.corner(corner_code='br')
ox, oy = other.corner(corner_code='br')
blank = np.zeros((max(sy, oy), max(sx, ox)))
image1 = cv2.drawContours(blank.copy(), [self.contour],
0,
1,
thickness=cv2.FILLED)
image2 = cv2.drawContours(blank.copy(), [other.contour],
0,
1,
thickness=cv2.FILLED)
intersectionArea = cv2.countNonZero(cv2.bitwise_and(image1, image2))
return intersectionArea
# intersection_threshold = 15
# perimeter_threshold = 0
# aspect_threshold = 1.9
# extent_threshold = 0.25
# solidity_threshold = 0.2
def similar(
self,
other: 'Blob',
intersection_threshold: float = \
hyperparams['tracking']['intersection_threshold'],
aspect_threshold: float = \
hyperparams['tracking']['aspect_threshold'],
extent_threshold: float = \
hyperparams['tracking']['extent_threshold'],
solidity_threshold: float = \
hyperparams['tracking']['solidity_threshold'],
):
'''
Check if the provided blob is similar to the current one.
'''
intersection = self.intersection_area(other)
return all([
intersection > intersection_threshold,
abs(self.aspect - other.aspect) <= aspect_threshold,
abs(self.extent - other.extent) <= extent_threshold,
abs(self.solidity - other.solidity) <= solidity_threshold,
])
def __str__(self) -> str:
return f'Centroid: {self.centroid}\tArea: {self.area}'
def __repr__(self) -> str:
return f'Centroid: {self.centroid}\tArea: {self.area}'
def __init__(self, contour):
'''
Parameters:
- contour: the blob's contour
'''
self._contour = contour | lib.py | from multiprocessing.sharedctypes import Value
from pprint import pprint
import cv2
import numpy as np
from functools import cached_property
from itertools import count
from collections import defaultdict
from skimage.metrics import structural_similarity as ssim
from tuning import hyperparams
# Used to assign unique ids to the entity.
counter = count()
class Tracker:
'''
Singleton in charge of tracking objects moving in the scenes, by associating newly detected blobs
with the previously detected entities.
'''
def __init__(self, context):
'''
Parameters:
- context: The parent IntrusionDetection object, which some global scene information
'''
self.context = context
self.entities_by_frame = defaultdict(list)
def get_entities(self, frame_index) -> list['Entity']:
'''
Retrieve all entities detected in a particular frame
'''
return self.entities_by_frame[frame_index]
def track_frame(self, blobs: list['Blob'], frame_index: int, frame,
bg_diff):
'''
Tries to associate newly detected blobs with previous entities.
In doing so, it updates the classification scores by running an heuristic function.
Parameters:
- blobs: A list of the newly detected Blob objects
- frame_index: the index of the current frame
- frame: The current frame image
- bg_diff: The difference between the current frame and the detected background
'''
current_entities = []
for b in blobs:
for e in self.entities_by_frame[frame_index - 1]:
if b.similar(e.blob) and e not in current_entities:
e.update(b)
current_entities.append(e)
break
else:
current_entities.append(Entity(b, frame_index))
for e in current_entities:
e.compute_scores(frame, bg_diff, self.context.bg)
self.entities_by_frame[frame_index] = current_entities
return self.entities_by_frame[frame_index]
class Entity(object):
'''
Track the evolution of a single blob over time.
'''
@property
def blob(self) -> 'Blob':
'''
The latest blob object associated with this entity.
'''
return self._blob
@property
def speedX(self) -> float:
'''
How much the blob's centroid has moved along the X axis.
'''
return self._speedX
@property
def speedY(self) -> float:
'''
How much the blob's centroid has moved along the Y axis.
'''
return self._speedY
@property
def color(self):
'''
A utility function used to label the detected object's class with color (BGR).
Returns:
- An RGB triplet: Blue for a person class, Red for a false object, Green for a true object.
'''
if self.classification == 'person':
return (255, 0, 0)
if self.classification == 'true_o':
return (0, 255, 0)
if self.classification == 'false_o':
return (0, 0, 255)
else:
return ValueError('Class not found')
@property
def id(self) -> str:
'''
The unique id associated with the entity.
The id is in the format #FXXXX_YYYY where XXXX corresponds to the frame index where the entity
is initially detected, and YYYY is a progressive unique integer.
'''
return self._id
def __init__(self, blob: 'Blob', frameNumber):
self._id = f'#F{frameNumber:04}_{counter.__next__():04}'
# print('Creating Entity', self._id)
self._blob = blob
self._prev_blob = blob
self._speedX = 0.0
self._speedY = 0.0
self._personScore = 0.0
self._objectScore = 0.0
@property
def classification(self) -> str:
'''
Returns the detected class of the entity.
'''
if self._personScore > 0:
return 'person'
elif self._objectScore > 0:
return 'true_o'
else:
return 'false_o'
@staticmethod
def CSVHeader() -> str:
return 'id; area; perimeter; aspect; centroidX; centroidY; boundingRectX; boundingRectY; boundingRectH; boundingRectW; classification'
def toCSV(self, sep='; ') -> str:
'''
Return all the geometric properties related to the entity's shape.
'''
return sep.join([self.id] + list(
map(lambda x: str(int(1000 * x) / (1000.0)), [
self.blob.area,
self.blob.perimeter,
self.blob.aspect,
self.blob.centroid[0],
self.blob.centroid[1],
])) + [
str(self.blob.bounding_rect)[1:-1].replace(', ', sep),
self.classification
])
def __repr__(self) -> str:
return f'{self._id}\tclass: {self.classification}\tps: {self._personScore}\tos: {self._objectScore}'
def update(self, blob: 'Blob'):
'''
Evolve the entity's history with a new blob, and recompute it's speed.
'''
self._prev_blob = self._blob
self._blob = blob
self._speedX = self._blob.centroid[0] - self._prev_blob.centroid[0]
self._speedY = self._blob.centroid[1] - self._prev_blob.centroid[1]
def compute_scores(self, frame, diff, bg):
'''
Update the classification scores with the heuristic.
'''
if abs(self.speedX) > 1 or abs(self.speedY) > 1:
self._personScore = min(self._personScore + 1, 40)
else:
self._personScore = max(self._personScore - 1, -40)
if self._check_contour_similarity(frame, diff):
self._objectScore = min(self._objectScore + 1, 40)
else:
self._objectScore = max(self._objectScore - 1, -40)
def _check_contour_similarity(self,
frame,
diff,
match_threshold=0.2) -> bool:
'''
Private function used to detect true or false object, by using the structural similarity index measure.
Parameters:
- frame: the original frame
- diff: the frame after background subtraction
Returns:
- True if the detected similarity greater or equal to the provided threshold.
'''
x, y, w, h = self.blob.bounding_rect
if (w < 7 or h < 7):
return False
frame_roi = frame[y:y + h, x:x + w, 0]
diff_roi = diff[y:y + h, x:x + w]
ssim_score = ssim(frame_roi,
diff_roi,
data_range=diff_roi.max() - diff_roi.min())
return ssim_score >= match_threshold
class Blob:
'''
Represents a single blob, without making any temporal assumption.
'''
@cached_property
def aspect(self):
x, y, w, h = cv2.boundingRect(self._contour)
return float(w) / h
@cached_property
def solidity(self):
area = cv2.contourArea(self._contour)
hull = cv2.convexHull(self._contour)
hull_area = cv2.contourArea(hull)
return float(area) / (hull_area + 0.00001)
@cached_property
def extent(self):
area = cv2.contourArea(self._contour)
_, _, w, h = cv2.boundingRect(self._contour)
return float(area) / (w * h)
@cached_property
def moments(self):
return cv2.moments(self._contour)
@cached_property
def centroid(self):
return (self.moments['m10'] / (self.moments['m00'] + 0.00001),
self.moments['m01'] / (self.moments['m00'] + 0.00001))
@cached_property
def perimeter(self):
return cv2.arcLength(self._contour, True)
@cached_property
def area(self):
return cv2.contourArea(self._contour)
@cached_property
def bounding_rect(self):
return cv2.boundingRect(self._contour)
@property
def contour(self):
return self._contour
def corner(self, corner_code):
'''
Obtain the coordinates of a corner of the bounding rect.
Parameters:
- corner_code: Must be one of 'tl', 'tr', 'bl', 'br' (t = top, b = bottom, l = left, r = right)
'''
x, y, h, w = self.bounding_rect
if corner_code == 'br':
return (x + w, y + h)
elif corner_code == 'bl':
return (x, y + h)
elif corner_code == 'tl':
return (x, y)
elif corner_code == 'tr':
return (x + w, y)
else:
raise ValueError('Expected one of (tl, tr, bl, br)')
def intersection_area(self, other):
'''
Compute the intersection area between this blob's contour and another one.
'''
sx, sy = self.corner(corner_code='br')
ox, oy = other.corner(corner_code='br')
blank = np.zeros((max(sy, oy), max(sx, ox)))
image1 = cv2.drawContours(blank.copy(), [self.contour],
0,
1,
thickness=cv2.FILLED)
image2 = cv2.drawContours(blank.copy(), [other.contour],
0,
1,
thickness=cv2.FILLED)
intersectionArea = cv2.countNonZero(cv2.bitwise_and(image1, image2))
return intersectionArea
# intersection_threshold = 15
# perimeter_threshold = 0
# aspect_threshold = 1.9
# extent_threshold = 0.25
# solidity_threshold = 0.2
def similar(
self,
other: 'Blob',
intersection_threshold: float = \
hyperparams['tracking']['intersection_threshold'],
aspect_threshold: float = \
hyperparams['tracking']['aspect_threshold'],
extent_threshold: float = \
hyperparams['tracking']['extent_threshold'],
solidity_threshold: float = \
hyperparams['tracking']['solidity_threshold'],
):
'''
Check if the provided blob is similar to the current one.
'''
intersection = self.intersection_area(other)
return all([
intersection > intersection_threshold,
abs(self.aspect - other.aspect) <= aspect_threshold,
abs(self.extent - other.extent) <= extent_threshold,
abs(self.solidity - other.solidity) <= solidity_threshold,
])
def __str__(self) -> str:
return f'Centroid: {self.centroid}\tArea: {self.area}'
def __repr__(self) -> str:
return f'Centroid: {self.centroid}\tArea: {self.area}'
def __init__(self, contour):
'''
Parameters:
- contour: the blob's contour
'''
self._contour = contour | 0.799638 | 0.443962 |
import numpy as np
import warnings
class ObjectiveFunction( object ):
"""
The `ObjectiveFunction` class defines an objective function.
"""
def __init__(self,definition=None):
self.definition = definition
self.parameters = []
self.global_minimum = None
self.subject_to = []
def add_parameter(self,parameter):
if not isinstance(parameter,ObjectiveFunctionParameter):
warnings.warn('Please use the ObjectiveFunctionParameter class.')
return
self.parameters.append(parameter)
self.update_dimension()
def add_subject(self,subject):
if not callable(subject):
warnings.warn('Subject must be a callable function returning a boolean.')
return
self.subject_to.append(subject)
def update_dimension(self):
self.dimension = len(self.parameters)
def evaluate(self,parameters=[]):
if not parameters:
parameters = self.parameters
return self.definition( parameters )
def determine_feasibility(self,parameters=[]):
if not parameters:
parameters = self.parameters
feasibility = []
for parameter in parameters:
feasibility.append( parameter.is_feasible() )
for subject in self.subject_to:
feasibility.append( subject(parameters) )
return np.mean(feasibility)
class MultiObjectiveFunction( ObjectiveFunction ):
"""
The `MultiObjectiveFunction` class defines various objective functions.
"""
def __init__(self):
super(MultiObjectiveFunction, self).__init__()
self.objective_functions = []
def add_objective_function(self,objective_function):
if not isinstance(objective_function,ObjectiveFunction):
warnings.warn('Please use the ObjectiveFunction class.')
return
self.objective_functions.append(objective_function)
self.update_objective_dimension()
def update_objective_dimension(self):
self.objective_dimension = len(self.objective_functions)
def evaluate(self,parameters=[]):
if not parameters:
parameters = self.parameters
f = []
for objective_function in self.objective_functions:
f.append( objective_function.evaluate(parameters=parameters) )
return f
def determine_feasibility(self,parameters=[]):
if not parameters:
parameters = self.parameters
feasibility = []
for parameter in parameters:
feasibility.append( parameter.is_feasible() )
for subject in self.subject_to:
feasibility.append( subject(parameters) )
feasibility = [np.mean(feasibility)]
for objective_function in self.objective_functions:
feasibility.append( objective_function.determine_feasibility(parameters=parameters) )
return np.mean(feasibility)
class ObjectiveFunctionParameter( object ):
"""
The `ObjectiveFunctionParameter` class defines a parameter for
the ObjectiveFunction.
"""
def __init__(self,
subject_to=lambda x:(x>-np.inf and x<np.inf),
mapping=lambda x:x):
self.subject_to = subject_to
self.mapping = mapping
self.value = None
self.mapping_parameter = None # between 0 and 1 (for binary optimzers)
self.true_value = None
self.bits = 8
self.code = None
self.label = ''
def set_random_value(self):
self.mapping_parameter = np.random.random()
self.value = self.mapping( self.mapping_parameter )
self.update_binary_representation()
def set_mapping_parameter(self,value):
self.mapping_parameter = value
self.value = self.mapping( self.mapping_parameter )
self.update_binary_representation()
def set_binary_code(self,code):
self.code = code
self.update_values()
def update_values(self):
# https://de.mathworks.com/matlabcentral/answers/25549-convert-floating-point-to-binary
self.mapping_parameter = np.dot(self.code,[2.0**p for p in np.arange(-1,-(self.bits+1),-1)])
self.value = self.mapping( self.mapping_parameter )
def update_binary_representation(self):
# https://de.mathworks.com/matlabcentral/answers/25549-convert-floating-point-to-binary
self.code = [np.fix(np.fmod(self.mapping_parameter * 2.0**p, 2)) for p in np.arange(1,self.bits+1)]
def is_feasible(self):
return self.subject_to( self.value ) | src/picasso/utils/optimization/objective_functions.py | import numpy as np
import warnings
class ObjectiveFunction( object ):
"""
The `ObjectiveFunction` class defines an objective function.
"""
def __init__(self,definition=None):
self.definition = definition
self.parameters = []
self.global_minimum = None
self.subject_to = []
def add_parameter(self,parameter):
if not isinstance(parameter,ObjectiveFunctionParameter):
warnings.warn('Please use the ObjectiveFunctionParameter class.')
return
self.parameters.append(parameter)
self.update_dimension()
def add_subject(self,subject):
if not callable(subject):
warnings.warn('Subject must be a callable function returning a boolean.')
return
self.subject_to.append(subject)
def update_dimension(self):
self.dimension = len(self.parameters)
def evaluate(self,parameters=[]):
if not parameters:
parameters = self.parameters
return self.definition( parameters )
def determine_feasibility(self,parameters=[]):
if not parameters:
parameters = self.parameters
feasibility = []
for parameter in parameters:
feasibility.append( parameter.is_feasible() )
for subject in self.subject_to:
feasibility.append( subject(parameters) )
return np.mean(feasibility)
class MultiObjectiveFunction( ObjectiveFunction ):
"""
The `MultiObjectiveFunction` class defines various objective functions.
"""
def __init__(self):
super(MultiObjectiveFunction, self).__init__()
self.objective_functions = []
def add_objective_function(self,objective_function):
if not isinstance(objective_function,ObjectiveFunction):
warnings.warn('Please use the ObjectiveFunction class.')
return
self.objective_functions.append(objective_function)
self.update_objective_dimension()
def update_objective_dimension(self):
self.objective_dimension = len(self.objective_functions)
def evaluate(self,parameters=[]):
if not parameters:
parameters = self.parameters
f = []
for objective_function in self.objective_functions:
f.append( objective_function.evaluate(parameters=parameters) )
return f
def determine_feasibility(self,parameters=[]):
if not parameters:
parameters = self.parameters
feasibility = []
for parameter in parameters:
feasibility.append( parameter.is_feasible() )
for subject in self.subject_to:
feasibility.append( subject(parameters) )
feasibility = [np.mean(feasibility)]
for objective_function in self.objective_functions:
feasibility.append( objective_function.determine_feasibility(parameters=parameters) )
return np.mean(feasibility)
class ObjectiveFunctionParameter( object ):
"""
The `ObjectiveFunctionParameter` class defines a parameter for
the ObjectiveFunction.
"""
def __init__(self,
subject_to=lambda x:(x>-np.inf and x<np.inf),
mapping=lambda x:x):
self.subject_to = subject_to
self.mapping = mapping
self.value = None
self.mapping_parameter = None # between 0 and 1 (for binary optimzers)
self.true_value = None
self.bits = 8
self.code = None
self.label = ''
def set_random_value(self):
self.mapping_parameter = np.random.random()
self.value = self.mapping( self.mapping_parameter )
self.update_binary_representation()
def set_mapping_parameter(self,value):
self.mapping_parameter = value
self.value = self.mapping( self.mapping_parameter )
self.update_binary_representation()
def set_binary_code(self,code):
self.code = code
self.update_values()
def update_values(self):
# https://de.mathworks.com/matlabcentral/answers/25549-convert-floating-point-to-binary
self.mapping_parameter = np.dot(self.code,[2.0**p for p in np.arange(-1,-(self.bits+1),-1)])
self.value = self.mapping( self.mapping_parameter )
def update_binary_representation(self):
# https://de.mathworks.com/matlabcentral/answers/25549-convert-floating-point-to-binary
self.code = [np.fix(np.fmod(self.mapping_parameter * 2.0**p, 2)) for p in np.arange(1,self.bits+1)]
def is_feasible(self):
return self.subject_to( self.value ) | 0.62395 | 0.336195 |
from __future__ import division
import math
import types
import torch
import utils
import random
import numbers
import numpy as np
import scipy as sp
from scipy import misc
from PIL import Image, ImageOps, ImageDraw
class Compose(object):
"""Composes several transforms together.
Args:
transforms (List[Transform]): list of transforms to compose.
Example:
>>> transforms.Compose([
>>> transforms.CenterCrop(10),
>>> transforms.ToTensor(),
>>> ])
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, input):
for t in self.transforms:
input = t(input)
return input
class ToTensor(object):
"""Converts a PIL.Image (RGB) or numpy.ndarray (H x W x C) in the range
[0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0]."""
def __call__(self, input):
for key in input.keys():
value = input[key]
if isinstance(value, np.ndarray):
# handle numpy array
input[key] = torch.from_numpy(value)
else:
# handle PIL Image
tmp = torch.ByteTensor(torch.ByteStorage.from_buffer(value.tobytes()))
value = tmp.view(value.size[1], value.size[0], len(value.mode))
# put it from HWC to CHW format
# yikes, this transpose takes 80% of the loading time/CPU
value = value.transpose(0, 1).transpose(0, 2).contiguous()
input[key] = value.float().div(255)
return input
class ToPILImage(object):
"""Converts a torch.*Tensor of range [0, 1] and shape C x H x W
or numpy ndarray of dtype=uint8, range[0, 255] and shape H x W x C
to a PIL.Image of range [0, 255]
"""
def __call__(self, input):
if isinstance(input['img'], np.ndarray):
# handle numpy array
input['img'] = Image.fromarray(input['img'])
else:
npimg = input['img'].mul(255).byte().numpy()
npimg = np.transpose(npimg, (1,2,0))
input['img'] = Image.fromarray(npimg)
return input
class Normalize(object):
"""Given mean: (R, G, B) and std: (R, G, B),
will normalize each channel of the torch.*Tensor, i.e.
channel = (channel - mean) / std
"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, input):
# TODO: make efficient
for t, m, s in zip(input['img'], self.mean, self.std):
t.sub_(m).div_(s)
return input
class Scale(object):
"""Rescales the input PIL.Image to the given 'size'.
'size' will be the size of the smaller edge.
For example, if height > width, then image will be
rescaled to (size * height / width, size)
size: size of the smaller edge
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self, size, interpolation=Image.BILINEAR):
self.size = size
self.interpolation = interpolation
def __call__(self, input):
w, h = input['img'].size
if (w <= h and w == self.size) or (h <= w and h == self.size):
return input
if w < h:
ow = self.size
oh = int(self.size * h / w)
input['img'] = input['img'].resize((ow, oh), self.interpolation)
return input
else:
oh = self.size
ow = int(self.size * w / h)
input['img'] = input['img'].resize((ow, oh), self.interpolation)
return input
class CenterCrop(object):
"""Crops the given PIL.Image at the center to have a region of
the given size. size can be a tuple (target_height, target_width)
or an integer, in which case the target will be of a square shape (size, size)
"""
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, input):
w, h = input['img'].size
th, tw = self.size
x1 = int(round((w - tw) / 2.))
y1 = int(round((h - th) / 2.))
input['img'] = input['img'].crop((x1, y1, x1 + tw, y1 + th))
return input
class Pad(object):
"""Pads the given PIL.Image on all sides with the given "pad" value"""
def __init__(self, padding, fill=0):
assert isinstance(padding, numbers.Number)
assert isinstance(fill, numbers.Number)
self.padding = padding
self.fill = fill
def __call__(self, input):
input['img'] = ImageOps.expand(input['img'], border=self.padding, fill=self.fill)
return input
class Lambda(object):
"""Applies a lambda as a transform."""
def __init__(self, lambd):
assert type(lambd) is types.LambdaType
self.lambd = lambd
def __call__(self, input):
input['img'] = self.lambd(img)
return input
class RandomCrop(object):
"""Crops the given PIL.Image at a random location to have a region of
the given size. size can be a tuple (target_height, target_width)
or an integer, in which case the target will be of a square shape (size, size)
"""
def __init__(self, size, padding=0):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
self.padding = padding
def __call__(self, input):
if self.padding > 0:
input['img'] = ImageOps.expand(img, border=self.padding, fill=0)
w, h = input['img'].size
th, tw = self.size
if w == tw and h == th:
return input
x1 = random.randint(0, w - tw)
y1 = random.randint(0, h - th)
input['img'] = input['img'].crop((x1, y1, x1 + tw, y1 + th))
return input
class RandomHorizontalFlip(object):
"""Randomly horizontally flips the given PIL.Image with a probability of 0.5
"""
def __call__(self, input):
if random.random() < 0.5:
input['img'] = input['img'].transpose(Image.FLIP_LEFT_RIGHT)
input['tgt'] = input['tgt'].transpose(Image.FLIP_LEFT_RIGHT)
input['loc'][0] = input['loc'][0] - math.ceil(input['img'].size[0]/2)
return input
class RandomSizedCrop(object):
"""Random crop the given PIL.Image to a random size of (0.08 to 1.0) of the original size
and and a random aspect ratio of 3/4 to 4/3 of the original aspect ratio
This is popularly used to train the Inception networks
size: size of the smaller edge
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self, size, interpolation=Image.BILINEAR):
self.size = size
self.interpolation = interpolation
def __call__(self, input):
for attempt in range(10):
area = input['img'].size[0] * input['img'].size[1]
target_area = random.uniform(0.08, 1.0) * area
aspect_ratio = random.uniform(3. / 4, 4. / 3)
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if random.random() < 0.5:
w, h = h, w
if w <= input['img'].size[0] and h <= input['img'].size[1]:
x1 = random.randint(0, input['img'].size[0] - w)
y1 = random.randint(0, input['img'].size[1] - h)
input['img'] = input['img'].crop((x1, y1, x1 + w, y1 + h))
assert(input['img'].size == (w, h))
input['img'] = input['img'].resize((self.size, self.size), self.interpolation)
return input
# Fallback
scale = Scale(self.size, interpolation=self.interpolation)
crop = CenterCrop(self.size)
return crop(scale(input))
class NormalizeLandmarks(object):
""" max-min normalization of landmarks to range [-1,1]"""
def __init__(self,xsize,ysize):
self.xsize = xsize
self.ysize = ysize
def __call__(self, input):
valid_points = [v for v in input['loc'] if v[0] != 0 and v[1] != 0]
mean = np.mean(valid_points,axis = 0)
for i in range(input['loc'].shape[0]):
input['loc'][i][0] = -1 + (input['loc'][i][0] * 2. )/(inputx_res)
input['loc'][i][1] = -1 + (input['loc'][i][1] * 2. )/(inputy_res)
return input
class AffineCrop(object):
def __init__(self,nlandmark,ix,iy,ox,oy,rangle=0,rscale=0,rtrans=0,gauss=1):
self.rangle=rangle
self.rscale=rscale
self.rtrans=rtrans
self.nlandmark=nlandmark
self.ix = ix
self.iy = iy
self.ox = ox
self.oy = oy
self.utils = utils
self.gauss = gauss
def __call__(self, input):
angle = self.rangle*(2*torch.rand(1)[0] - 1)
grad_angle = angle * math.pi / 180
scale = 1+self.rscale*(2*torch.rand(1)[0] - 1)
transx = self.rtrans*(2*torch.rand(1)[0] - 1)
transy = self.rtrans*(2*torch.rand(1)[0] - 1)
img = input['img']
size = img.size
h, w = size[0], size[1]
centerX, centerY = int(w/2), int(h/2)
# perform rotation
img = img.rotate(angle, Image.BICUBIC)
# perform translation
img = img.transform(img.size, Image.AFFINE, (1, 0, transx, 0, 1, transy))
# perform scaling
img = img.resize((int(math.ceil(scale*h)) , int(math.ceil(scale*w))) , Image.ANTIALIAS)
w, h = img.size
x1 = int(round((w - self.ix) / 2.))
y1 = int(round((h - self.ix) / 2.))
input['img'] = img.crop((x1, y1, x1 + self.ix, y1 + self.iy))
if (np.sum(input['loc']) != 0):
occ = input['occ']
loc = input['loc']
newloc = np.ones((3,loc.shape[1]+1))
newloc[0:2,0:loc.shape[1]] = loc
newloc[0,loc.shape[1]] = centerY
newloc[1,loc.shape[1]] = centerX
trans_matrix = np.array([[1,0,-1*transx],[0,1,-1*transy],[0,0,1]])
scale_matrix = np.array([[scale,0,0],[0,scale,0],[0,0,1]])
angle_matrix = np.array([[math.cos(grad_angle),math.sin(grad_angle),0],[-math.sin(grad_angle),math.cos(grad_angle),0],[0,0,1]])
# perform rotation
newloc[0,:] = newloc[0,:] - centerY
newloc[1,:] = newloc[1,:] - centerX
newloc = np.dot(angle_matrix, newloc)
newloc[0,:] = newloc[0,:] + centerY
newloc[1,:] = newloc[1,:] + centerX
# perform translation
newloc = np.dot(trans_matrix, newloc)
# perform scaling
newloc = np.dot(scale_matrix, newloc)
newloc[0,:] = newloc[0,:] - y1
newloc[1,:] = newloc[1,:] - x1
input['loc'] = newloc[0:2,:]
for i in range(input['loc'].shape[1]):
if ~((input['loc'][0,i] == np.nan) & (input['loc'][1,i] == np.nan)):
if ((input['loc'][0,i] < 0) | (input['loc'][0,i] > self.iy) | (input['loc'][1,i] < 0) | (input['loc'][1,i] > self.ix)):
input['loc'][:,i] = np.nan
input['occ'][i] = 0
# generate heatmaps
input['tgt'] = np.zeros((self.nlandmark+1, self.ox, self.oy))
for i in range(self.nlandmark):
if (not np.isnan(input['loc'][:,i][0]) and not np.isnan(input['loc'][:,i][1])):
tmp = self.utils.gaussian(np.array([self.ix,self.iy]),input['loc'][:,i],self.gauss)
scaled_tmp = sp.misc.imresize(tmp, [self.ox, self.oy])
scaled_tmp = (scaled_tmp - min(scaled_tmp.flatten()) ) / ( max(scaled_tmp.flatten()) - min(scaled_tmp.flatten()))
else:
scaled_tmp = np.zeros([self.ox,self.oy])
input['tgt'][i] = scaled_tmp
tmp = self.utils.gaussian(np.array([self.iy,self.ix]),input['loc'][:,-1],4*self.gauss)
scaled_tmp = sp.misc.imresize(tmp, [self.ox, self.oy])
scaled_tmp = (scaled_tmp - min(scaled_tmp.flatten()) ) / ( max(scaled_tmp.flatten()) - min(scaled_tmp.flatten()))
input['tgt'][self.nlandmark] = scaled_tmp
return input | datasets/transforms.py |
from __future__ import division
import math
import types
import torch
import utils
import random
import numbers
import numpy as np
import scipy as sp
from scipy import misc
from PIL import Image, ImageOps, ImageDraw
class Compose(object):
"""Composes several transforms together.
Args:
transforms (List[Transform]): list of transforms to compose.
Example:
>>> transforms.Compose([
>>> transforms.CenterCrop(10),
>>> transforms.ToTensor(),
>>> ])
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, input):
for t in self.transforms:
input = t(input)
return input
class ToTensor(object):
"""Converts a PIL.Image (RGB) or numpy.ndarray (H x W x C) in the range
[0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0]."""
def __call__(self, input):
for key in input.keys():
value = input[key]
if isinstance(value, np.ndarray):
# handle numpy array
input[key] = torch.from_numpy(value)
else:
# handle PIL Image
tmp = torch.ByteTensor(torch.ByteStorage.from_buffer(value.tobytes()))
value = tmp.view(value.size[1], value.size[0], len(value.mode))
# put it from HWC to CHW format
# yikes, this transpose takes 80% of the loading time/CPU
value = value.transpose(0, 1).transpose(0, 2).contiguous()
input[key] = value.float().div(255)
return input
class ToPILImage(object):
"""Converts a torch.*Tensor of range [0, 1] and shape C x H x W
or numpy ndarray of dtype=uint8, range[0, 255] and shape H x W x C
to a PIL.Image of range [0, 255]
"""
def __call__(self, input):
if isinstance(input['img'], np.ndarray):
# handle numpy array
input['img'] = Image.fromarray(input['img'])
else:
npimg = input['img'].mul(255).byte().numpy()
npimg = np.transpose(npimg, (1,2,0))
input['img'] = Image.fromarray(npimg)
return input
class Normalize(object):
"""Given mean: (R, G, B) and std: (R, G, B),
will normalize each channel of the torch.*Tensor, i.e.
channel = (channel - mean) / std
"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, input):
# TODO: make efficient
for t, m, s in zip(input['img'], self.mean, self.std):
t.sub_(m).div_(s)
return input
class Scale(object):
"""Rescales the input PIL.Image to the given 'size'.
'size' will be the size of the smaller edge.
For example, if height > width, then image will be
rescaled to (size * height / width, size)
size: size of the smaller edge
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self, size, interpolation=Image.BILINEAR):
self.size = size
self.interpolation = interpolation
def __call__(self, input):
w, h = input['img'].size
if (w <= h and w == self.size) or (h <= w and h == self.size):
return input
if w < h:
ow = self.size
oh = int(self.size * h / w)
input['img'] = input['img'].resize((ow, oh), self.interpolation)
return input
else:
oh = self.size
ow = int(self.size * w / h)
input['img'] = input['img'].resize((ow, oh), self.interpolation)
return input
class CenterCrop(object):
"""Crops the given PIL.Image at the center to have a region of
the given size. size can be a tuple (target_height, target_width)
or an integer, in which case the target will be of a square shape (size, size)
"""
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, input):
w, h = input['img'].size
th, tw = self.size
x1 = int(round((w - tw) / 2.))
y1 = int(round((h - th) / 2.))
input['img'] = input['img'].crop((x1, y1, x1 + tw, y1 + th))
return input
class Pad(object):
"""Pads the given PIL.Image on all sides with the given "pad" value"""
def __init__(self, padding, fill=0):
assert isinstance(padding, numbers.Number)
assert isinstance(fill, numbers.Number)
self.padding = padding
self.fill = fill
def __call__(self, input):
input['img'] = ImageOps.expand(input['img'], border=self.padding, fill=self.fill)
return input
class Lambda(object):
"""Applies a lambda as a transform."""
def __init__(self, lambd):
assert type(lambd) is types.LambdaType
self.lambd = lambd
def __call__(self, input):
input['img'] = self.lambd(img)
return input
class RandomCrop(object):
"""Crops the given PIL.Image at a random location to have a region of
the given size. size can be a tuple (target_height, target_width)
or an integer, in which case the target will be of a square shape (size, size)
"""
def __init__(self, size, padding=0):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
self.padding = padding
def __call__(self, input):
if self.padding > 0:
input['img'] = ImageOps.expand(img, border=self.padding, fill=0)
w, h = input['img'].size
th, tw = self.size
if w == tw and h == th:
return input
x1 = random.randint(0, w - tw)
y1 = random.randint(0, h - th)
input['img'] = input['img'].crop((x1, y1, x1 + tw, y1 + th))
return input
class RandomHorizontalFlip(object):
"""Randomly horizontally flips the given PIL.Image with a probability of 0.5
"""
def __call__(self, input):
if random.random() < 0.5:
input['img'] = input['img'].transpose(Image.FLIP_LEFT_RIGHT)
input['tgt'] = input['tgt'].transpose(Image.FLIP_LEFT_RIGHT)
input['loc'][0] = input['loc'][0] - math.ceil(input['img'].size[0]/2)
return input
class RandomSizedCrop(object):
"""Random crop the given PIL.Image to a random size of (0.08 to 1.0) of the original size
and and a random aspect ratio of 3/4 to 4/3 of the original aspect ratio
This is popularly used to train the Inception networks
size: size of the smaller edge
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self, size, interpolation=Image.BILINEAR):
self.size = size
self.interpolation = interpolation
def __call__(self, input):
for attempt in range(10):
area = input['img'].size[0] * input['img'].size[1]
target_area = random.uniform(0.08, 1.0) * area
aspect_ratio = random.uniform(3. / 4, 4. / 3)
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if random.random() < 0.5:
w, h = h, w
if w <= input['img'].size[0] and h <= input['img'].size[1]:
x1 = random.randint(0, input['img'].size[0] - w)
y1 = random.randint(0, input['img'].size[1] - h)
input['img'] = input['img'].crop((x1, y1, x1 + w, y1 + h))
assert(input['img'].size == (w, h))
input['img'] = input['img'].resize((self.size, self.size), self.interpolation)
return input
# Fallback
scale = Scale(self.size, interpolation=self.interpolation)
crop = CenterCrop(self.size)
return crop(scale(input))
class NormalizeLandmarks(object):
""" max-min normalization of landmarks to range [-1,1]"""
def __init__(self,xsize,ysize):
self.xsize = xsize
self.ysize = ysize
def __call__(self, input):
valid_points = [v for v in input['loc'] if v[0] != 0 and v[1] != 0]
mean = np.mean(valid_points,axis = 0)
for i in range(input['loc'].shape[0]):
input['loc'][i][0] = -1 + (input['loc'][i][0] * 2. )/(inputx_res)
input['loc'][i][1] = -1 + (input['loc'][i][1] * 2. )/(inputy_res)
return input
class AffineCrop(object):
def __init__(self,nlandmark,ix,iy,ox,oy,rangle=0,rscale=0,rtrans=0,gauss=1):
self.rangle=rangle
self.rscale=rscale
self.rtrans=rtrans
self.nlandmark=nlandmark
self.ix = ix
self.iy = iy
self.ox = ox
self.oy = oy
self.utils = utils
self.gauss = gauss
def __call__(self, input):
angle = self.rangle*(2*torch.rand(1)[0] - 1)
grad_angle = angle * math.pi / 180
scale = 1+self.rscale*(2*torch.rand(1)[0] - 1)
transx = self.rtrans*(2*torch.rand(1)[0] - 1)
transy = self.rtrans*(2*torch.rand(1)[0] - 1)
img = input['img']
size = img.size
h, w = size[0], size[1]
centerX, centerY = int(w/2), int(h/2)
# perform rotation
img = img.rotate(angle, Image.BICUBIC)
# perform translation
img = img.transform(img.size, Image.AFFINE, (1, 0, transx, 0, 1, transy))
# perform scaling
img = img.resize((int(math.ceil(scale*h)) , int(math.ceil(scale*w))) , Image.ANTIALIAS)
w, h = img.size
x1 = int(round((w - self.ix) / 2.))
y1 = int(round((h - self.ix) / 2.))
input['img'] = img.crop((x1, y1, x1 + self.ix, y1 + self.iy))
if (np.sum(input['loc']) != 0):
occ = input['occ']
loc = input['loc']
newloc = np.ones((3,loc.shape[1]+1))
newloc[0:2,0:loc.shape[1]] = loc
newloc[0,loc.shape[1]] = centerY
newloc[1,loc.shape[1]] = centerX
trans_matrix = np.array([[1,0,-1*transx],[0,1,-1*transy],[0,0,1]])
scale_matrix = np.array([[scale,0,0],[0,scale,0],[0,0,1]])
angle_matrix = np.array([[math.cos(grad_angle),math.sin(grad_angle),0],[-math.sin(grad_angle),math.cos(grad_angle),0],[0,0,1]])
# perform rotation
newloc[0,:] = newloc[0,:] - centerY
newloc[1,:] = newloc[1,:] - centerX
newloc = np.dot(angle_matrix, newloc)
newloc[0,:] = newloc[0,:] + centerY
newloc[1,:] = newloc[1,:] + centerX
# perform translation
newloc = np.dot(trans_matrix, newloc)
# perform scaling
newloc = np.dot(scale_matrix, newloc)
newloc[0,:] = newloc[0,:] - y1
newloc[1,:] = newloc[1,:] - x1
input['loc'] = newloc[0:2,:]
for i in range(input['loc'].shape[1]):
if ~((input['loc'][0,i] == np.nan) & (input['loc'][1,i] == np.nan)):
if ((input['loc'][0,i] < 0) | (input['loc'][0,i] > self.iy) | (input['loc'][1,i] < 0) | (input['loc'][1,i] > self.ix)):
input['loc'][:,i] = np.nan
input['occ'][i] = 0
# generate heatmaps
input['tgt'] = np.zeros((self.nlandmark+1, self.ox, self.oy))
for i in range(self.nlandmark):
if (not np.isnan(input['loc'][:,i][0]) and not np.isnan(input['loc'][:,i][1])):
tmp = self.utils.gaussian(np.array([self.ix,self.iy]),input['loc'][:,i],self.gauss)
scaled_tmp = sp.misc.imresize(tmp, [self.ox, self.oy])
scaled_tmp = (scaled_tmp - min(scaled_tmp.flatten()) ) / ( max(scaled_tmp.flatten()) - min(scaled_tmp.flatten()))
else:
scaled_tmp = np.zeros([self.ox,self.oy])
input['tgt'][i] = scaled_tmp
tmp = self.utils.gaussian(np.array([self.iy,self.ix]),input['loc'][:,-1],4*self.gauss)
scaled_tmp = sp.misc.imresize(tmp, [self.ox, self.oy])
scaled_tmp = (scaled_tmp - min(scaled_tmp.flatten()) ) / ( max(scaled_tmp.flatten()) - min(scaled_tmp.flatten()))
input['tgt'][self.nlandmark] = scaled_tmp
return input | 0.543833 | 0.471832 |
import logging
from lxml import etree
try:
from PIL import Image
except ImportError:
import Image
import zipfile
import shutil
import os
from os.path import join
import tempfile
from namespaces import nsprefixes
from StringIO import StringIO
log = logging.getLogger(__name__)
# Record template directory's location which is just 'template' for a docx
# developer or 'site-packages/docx-template' if you have installed docx
template_dir = join(os.path.dirname(__file__), 'pptx_template') # installed
if not os.path.isdir(template_dir):
template_dir = join(os.path.dirname(__file__), 'pptx_template') # dev
def relationshiplist():
relationshiplist = [
['http://schemas.openxmlformats.org/officeDocument/2006/relationships/theme', 'theme/theme1.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/relationships/slideMaster', 'slideMasters/slideMaster1.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/relationships/slide', 'slides/slide1.xml'],
]
return relationshiplist
def contenttypes():
# FIXME - doesn't quite work...read from string as temp hack...
#types = makeelement('Types',nsprefix='ct')
types = etree.fromstring('''<Types xmlns="http://schemas.openxmlformats.org/package/2006/content-types"></Types>''')
parts = {
'/_rels/.rels': 'application/vnd.openxmlformats-package.relationships+xml',
'/ppt/_rels/presentation.xml.rels': 'application/vnd.openxmlformats-package.relationships+xml',
'/ppt/presentation.xml': 'application/vnd.openxmlformats-officedocument.presentationml.presentation.main+xml',
'/ppt/slides/_rels/slide1.xml.rels': 'application/vnd.openxmlformats-package.relationships+xml',
'/ppt/slides/slide1.xml': 'application/vnd.openxmlformats-officedocument.presentationml.slide+xml',
'/ppt/theme/theme1.xml': 'application/vnd.openxmlformats-officedocument.theme+xml',
'/ppt/slideMasters/slideMaster1.xml': 'application/vnd.openxmlformats-officedocument.presentationml.slideMaster+xml',
'/ppt/slideMasters/_rels/slideMaster1.xml.rels': 'application/vnd.openxmlformats-package.relationships+xml'
}
for i in range(1, 13):
path1 = '/ppt/slideLayouts/slideLayout' + str(i) + '.xml'
path2 = '/ppt/slideLayouts/_rels/slideLayout' + str(i) + '.xml.rels'
parts[path1] = 'application/vnd.openxmlformats-officedocument.presentationml.slideLayout+xml'
parts[path2] = 'application/vnd.openxmlformats-package.relationships+xml'
for part in parts:
types.append(makeelement('Override', nsprefix=None, attributes={'PartName': part, 'ContentType': parts[part]}))
# Add support for filetypes
filetypes = {'rels': 'application/vnd.openxmlformats-package.relationships+xml', 'xml': 'application/xml', 'jpeg': 'image/jpeg', 'gif': 'image/gif', 'png': 'image/png'}
for extension in filetypes:
types.append(makeelement('Default', nsprefix=None, attributes={'Extension': extension, 'ContentType': filetypes[extension]}))
return types
def pptrelationships(relationshiplist):
'''Generate a ppt relationships file'''
# Default list of relationships
# FIXME: using string hack instead of making element
#relationships = makeelement('Relationships',nsprefix='pr')
relationships = etree.fromstring(
'''<Relationships xmlns="http://schemas.openxmlformats.org/package/2006/relationships">
</Relationships>'''
)
count = 0
for relationship in relationshiplist:
# Relationship IDs (rId) start at 1.
relationships.append(makeelement('Relationship', attributes={'Id': 'rId' + str(count + 1),
'Type': relationship[0], 'Target': relationship[1]}, nsprefix=None))
count += 1
return relationships
def makeelement(tagname, tagtext=None, nsprefix='p', attributes=None, attrnsprefix=None):
'''Create an element & return it'''
# Deal with list of nsprefix by making namespacemap
namespacemap = None
if isinstance(nsprefix, list):
namespacemap = {}
for prefix in nsprefix:
namespacemap[prefix] = nsprefixes[prefix]
nsprefix = nsprefix[0] # FIXME: rest of code below expects a single prefix
elif nsprefix:
namespacemap = {nsprefix: nsprefixes[nsprefix]}
else:
# For when namespace = None
nsprefix = 'p'
namespace = '{' + nsprefixes[nsprefix] + '}'
newelement = etree.Element(namespace + tagname, nsmap=namespacemap)
# Add attributes with namespaces
if attributes:
# If they haven't bothered setting attribute namespace, use an empty string
# (equivalent of no namespace)
if not attrnsprefix:
# Quick hack: it seems every element that has a 'w' nsprefix for its tag uses the same prefix for it's attributes
if nsprefix == 'w':
attributenamespace = namespace
else:
attributenamespace = ''
else:
attributenamespace = '{' + nsprefixes[attrnsprefix] + '}'
for tagattribute in attributes:
newelement.set(attributenamespace + tagattribute, attributes[tagattribute])
if tagtext:
newelement.text = tagtext
return newelement
def picture(picname, slide_rels, picdescription='No Description', pixelwidth=None,
pixelheight=None, nochangeaspect=True, nochangearrowheads=True,
template=template_dir, align='center', scale=1):
'''Take a relationshiplist, picture file name, and return a paragraph containing the image and an updated relationshiplist'''
# http://openxmldeveloper.org/articles/462.aspx
# Create an image. Size may be specified, otherwise it will based on the
# pixel size of image. Return a paragraph containing the picture'''
# Copy the file into the media dir
media_dir = join(template, 'ppt', 'media')
if not os.path.isdir(media_dir):
os.mkdir(media_dir)
new_picname = join(media_dir, os.path.basename(picname))
shutil.copyfile(picname, new_picname)
picname = new_picname
# Check if the user has specified a size
if not pixelwidth or not pixelheight:
# If not, get info from the picture itself
pixelwidth, pixelheight = Image.open(picname).size[0:2]
picname = os.path.basename(picname)
# OpenXML measures on-screen objects in English Metric Units
# 1cm = 36000 EMUs
emuperpixel = 12667
width = str(int(pixelwidth * emuperpixel * scale))
height = str(int(pixelheight * emuperpixel * scale))
# Set relationship ID to the first available
picid = len(slide_rels) + 1
picrelid = 'rId' + str(picid)
slide_rels.append([nsprefixes['i'], '../media/' + picname, str(picid)])
# There are 3 main elements inside a picture
# 1. The Blipfill - specifies how the image fills the picture area (stretch, tile, etc.)
blipfill = makeelement('blipFill')
blipfill.append(makeelement('blip', nsprefix='a', attrnsprefix='r', attributes={'embed': picrelid}))
stretch = makeelement('stretch', nsprefix='a')
stretch.append(makeelement('fillRect', nsprefix='a'))
blipfill.append(stretch)
# 2. The non visual picture properties
nvpicpr = makeelement('nvPicPr', nsprefix='p')
cnvpr = makeelement('cNvPr', nsprefix='p',
attributes={'id': '37', 'name': 'BLAH'})
nvpicpr.append(cnvpr)
cnvpicpr = makeelement('cNvPicPr')
nvpicpr.append(cnvpicpr)
nvpicpr.append(makeelement('nvPr'))
# 3. The Shape properties
sppr = makeelement('spPr')
xfrm = makeelement('xfrm', nsprefix='a')
xfrm.append(makeelement('off', nsprefix='a', attributes={'x': '1405440', 'y': '1820520'}))
xfrm.append(makeelement('ext', nsprefix='a', attributes={'cx': width, 'cy': height}))
prstgeom = makeelement('prstGeom', nsprefix='a', attributes={'prst': 'rect'})
prstgeom.append(makeelement('avLst', nsprefix='a'))
sppr.append(xfrm)
sppr.append(prstgeom)
# Add our 3 parts to the picture element
pic = makeelement('pic', nsprefix='p')
pic.append(nvpicpr)
pic.append(blipfill)
pic.append(sppr)
return slide_rels, pic
def savepptx(document, output, slides, media_files, pptrelationships,
contenttypes=contenttypes(), template=template_dir):
'''Save a modified document'''
assert os.path.isdir(template)
docxfile = zipfile.ZipFile(output, mode='w', compression=zipfile.ZIP_DEFLATED)
# Serialize our trees into out zip file
'''
treesandfiles = {document:'ppt/presentation.xml',
contenttypes:'[Content_Types].xml',
pptrelationships:'ppt/_rels/presentation.xml.rels'}
for tree in treesandfiles:
log.info('Saving: '+treesandfiles[tree] )
treestring = etree.tostring(tree, pretty_print=True)
docxfile.writestr(treesandfiles[tree],treestring)
'''
for slide in slides:
treestring = etree.tostring(slide.slide, pretty_print=True)
parser = etree.XMLParser(ns_clean=True)
tree = etree.parse(StringIO(treestring), parser)
treestring = etree.tostring(tree, pretty_print=True)
docxfile.writestr('ppt/slides/slide' + str(slide.number) + '.xml', treestring)
rels_tree = etree.fromstring('''<Relationships xmlns="http://schemas.openxmlformats.org/package/2006/relationships"></Relationships>''')
for rel in slide.relationships:
rel_el = (etree.Element('Relationship'))
rel_el.set('Id', 'rId' + rel[2])
rel_el.set('Type', rel[0])
rel_el.set('Target', rel[1])
rels_tree.append(rel_el)
rels_string = etree.tostring(rels_tree, pretty_print=True)
docxfile.writestr('ppt/slides/_rels/slide' + str(slide.number) + '.xml.rels',
rels_string)
# Add & compress support files
allowed = ['.xml', '.rels']
for dirpath, dirnames, filenames in os.walk(template):
for filename in filenames:
ext = os.path.splitext(filename)[1]
if ext in allowed or filename in allowed or filename in media_files:
doc_file = os.path.join(dirpath, filename)
archivename = doc_file[len(template) + 1:]
docxfile.write(doc_file, archivename)
docxfile.close()
def slide():
sld = makeelement('sld', nsprefix=['p', 'r', 'a'])
csld = makeelement('cSld')
sptree = makeelement('spTree')
nvgrpsppr = makeelement('nvGrpSpPr')
cnvpr = makeelement('cNvPr', attributes={'id': '1', 'name': ''})
cnvgrpsppr = makeelement('cNvGrpSpPr')
nvpr = makeelement('nvPr')
nvgrpsppr.append(cnvpr)
nvgrpsppr.append(cnvgrpsppr)
nvgrpsppr.append(nvpr)
sptree.append(nvgrpsppr)
grpsppr = makeelement('grpSpPr')
xfrm = makeelement('xfrm', nsprefix='a')
xfrm.append(makeelement('off', attributes={'x': '0', 'y': '0'}, nsprefix='a'))
xfrm.append(makeelement('ext', attributes={'cx': '0', 'cy': '0'}, nsprefix='a'))
xfrm.append(makeelement('chOff', attributes={'x': '0', 'y': '0'}, nsprefix='a'))
xfrm.append(makeelement('chExt', attributes={'cx': '0', 'cy': '0'}, nsprefix='a'))
grpsppr.append(xfrm)
sptree.append(grpsppr)
csld.append(sptree)
sld.append(csld)
clrmapovr = makeelement('clrMapOvr')
clrmapovr.append(makeelement('masterClrMapping', nsprefix='a'))
sld.append(clrmapovr)
return sld
def text_box(text):
sp = makeelement('sp')
nvsppr = makeelement('nvSpPr')
nvsppr.append(makeelement('cNvPr', attributes={'id': '37', 'name': 'TextShape 1'}))
nvsppr.append(makeelement('cNvSpPr', attributes={'txBox': '1'}))
nvsppr.append(makeelement('nvPr'))
sp.append(nvsppr)
sppr = makeelement('spPr')
xfrm = makeelement('xfrm', nsprefix='a')
xfrm.append(makeelement('off', nsprefix='a', attributes={'x': '0', 'y': '332656'}))
xfrm.append(makeelement('ext', nsprefix='a', attributes={'cx': '9144000', 'cy': '1262160'}))
sppr.append(xfrm)
prstgeom = makeelement('prstGeom', nsprefix='a', attributes={'prst': 'rect'})
prstgeom.append(makeelement('avLst', nsprefix='a'))
sppr.append(prstgeom)
sp.append(sppr)
txbody = makeelement('txBody')
txbody.append(makeelement('bodyPr', nsprefix='a', attributes={'anchor': 'ctr', 'bIns': '0', 'lIns': '0', 'rIns': '0', 'tIns': '0', 'wrap': 'none'}))
p = makeelement('p', nsprefix='a')
p.append(makeelement('pPr', nsprefix='a', attributes={'algn': 'ctr'}))
r = makeelement('r', nsprefix='a')
r.append(makeelement('rPr', nsprefix='a', attributes={'lang': 'en-GB'}))
r.append(makeelement('t', nsprefix='a', tagtext=text)) # this is where the text goes.
p.append(r)
p.append(makeelement('endParaRPr', nsprefix='a'))
txbody.append(p)
sp.append(txbody)
return sp
class Slide(object):
def __init__(self):
self.slide = slide()
self.relationships = [
[nsprefixes['sl'], '../slideLayouts/slideLayout2.xml', '1']
]
self.number = None
self.media_files = []
return
@classmethod
def create(cls, template_dir):
slide = cls()
slide.template_dir = template_dir
return slide
def add_picture(self, picname, *args, **kwargs):
extension = os.path.splitext(picname)[1]
if extension not in ['.jpg', '.jpeg', '.png']:
raise ValueError
self.relationships, pic = picture(picname, slide_rels=self.relationships,
template=self.template_dir, *args, **kwargs)
self.slide.xpath('/p:sld/p:cSld/p:spTree', namespaces=nsprefixes)[0].append(pic)
self.media_files.append(os.path.basename(picname))
def add_text_box(self, text):
self.slide.xpath('/p:sld/p:cSld/p:spTree', namespaces=nsprefixes)[0].append(
text_box(text))
class Document(object):
def __init__(self):
self.relationshiplist = relationshiplist()
self.slide_rels = [] # Each member of this list will be a list of relationships for a particular slide. Each relationship is itself a list, whose first member is the Type of the relationship (a namespace) and whose second member is the Target for the relationship.
self.tmpdir = tempfile.mkdtemp()
self.template_dir = os.path.join(self.tmpdir, 'template')
shutil.copytree(template_dir, self.template_dir) # we copy our template files to a temp location
return
@classmethod
def create(cls):
doc = cls()
doc.presentation = makeelement('presentation')
master_id_list = makeelement('sldMasterIdLst')
master_id_list.append(makeelement('sldMasterId', attributes={'id': '2147483648', '{' + nsprefixes['r'] + '}' + 'id': 'rId2'}))
doc.presentation.append(master_id_list)
doc.presentation.append(makeelement('sldIdLst'))
doc.presentation.append(makeelement('sldSz', attributes={'cx': '10080625',
'cy': '7559675'}))
doc.presentation.append(makeelement('notesSz', attributes={'cx': '7559675',
'cy': '10691812'}))
doc.slides = []
return doc
def add_slide(self):
slide = Slide.create(template_dir=self.template_dir)
slide.number = len(self.slides) + 1
self.slides.append(slide)
slide_list = self.presentation.xpath('/p:presentation/p:sldIdLst',
namespaces=nsprefixes)[0]
slide_list.append(makeelement('sldId',
attributes={'id': str(256 + len(self.slides) - 1),
'{' + nsprefixes['r'] + '}' + 'id': 'rId' + str(3 + len(self.slides) - 1)}))
return slide
def save(self, filename, *args, **kwargs):
media_files = []
for slide in self.slides:
media_files += slide.media_files
suffix = '.pptx'
if filename[-5:] != suffix: filename = filename + suffix
return savepptx(document=self.presentation, slides=self.slides,
media_files=media_files, template=self.template_dir,
output=filename,
pptrelationships=pptrelationships(self.relationshiplist),
*args, **kwargs)
def get_file_object(self, *args, **kwargs):
'''Get the document as a file-like object.'''
filedir = tempfile.mkdtemp()
filepath = os.path.join(filedir, 'rendered_pptx.pptx')
self.save(filename=filepath)
f = open(filepath)
shutil.rmtree(filedir)
return f
def get_as_string(self, *args, **kwargs):
return self.get_file_object(*args, **kwargs).read()
def close(self):
shutil.rmtree(self.tmpdir) | openxml/pptx.py | import logging
from lxml import etree
try:
from PIL import Image
except ImportError:
import Image
import zipfile
import shutil
import os
from os.path import join
import tempfile
from namespaces import nsprefixes
from StringIO import StringIO
log = logging.getLogger(__name__)
# Record template directory's location which is just 'template' for a docx
# developer or 'site-packages/docx-template' if you have installed docx
template_dir = join(os.path.dirname(__file__), 'pptx_template') # installed
if not os.path.isdir(template_dir):
template_dir = join(os.path.dirname(__file__), 'pptx_template') # dev
def relationshiplist():
relationshiplist = [
['http://schemas.openxmlformats.org/officeDocument/2006/relationships/theme', 'theme/theme1.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/relationships/slideMaster', 'slideMasters/slideMaster1.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/relationships/slide', 'slides/slide1.xml'],
]
return relationshiplist
def contenttypes():
# FIXME - doesn't quite work...read from string as temp hack...
#types = makeelement('Types',nsprefix='ct')
types = etree.fromstring('''<Types xmlns="http://schemas.openxmlformats.org/package/2006/content-types"></Types>''')
parts = {
'/_rels/.rels': 'application/vnd.openxmlformats-package.relationships+xml',
'/ppt/_rels/presentation.xml.rels': 'application/vnd.openxmlformats-package.relationships+xml',
'/ppt/presentation.xml': 'application/vnd.openxmlformats-officedocument.presentationml.presentation.main+xml',
'/ppt/slides/_rels/slide1.xml.rels': 'application/vnd.openxmlformats-package.relationships+xml',
'/ppt/slides/slide1.xml': 'application/vnd.openxmlformats-officedocument.presentationml.slide+xml',
'/ppt/theme/theme1.xml': 'application/vnd.openxmlformats-officedocument.theme+xml',
'/ppt/slideMasters/slideMaster1.xml': 'application/vnd.openxmlformats-officedocument.presentationml.slideMaster+xml',
'/ppt/slideMasters/_rels/slideMaster1.xml.rels': 'application/vnd.openxmlformats-package.relationships+xml'
}
for i in range(1, 13):
path1 = '/ppt/slideLayouts/slideLayout' + str(i) + '.xml'
path2 = '/ppt/slideLayouts/_rels/slideLayout' + str(i) + '.xml.rels'
parts[path1] = 'application/vnd.openxmlformats-officedocument.presentationml.slideLayout+xml'
parts[path2] = 'application/vnd.openxmlformats-package.relationships+xml'
for part in parts:
types.append(makeelement('Override', nsprefix=None, attributes={'PartName': part, 'ContentType': parts[part]}))
# Add support for filetypes
filetypes = {'rels': 'application/vnd.openxmlformats-package.relationships+xml', 'xml': 'application/xml', 'jpeg': 'image/jpeg', 'gif': 'image/gif', 'png': 'image/png'}
for extension in filetypes:
types.append(makeelement('Default', nsprefix=None, attributes={'Extension': extension, 'ContentType': filetypes[extension]}))
return types
def pptrelationships(relationshiplist):
'''Generate a ppt relationships file'''
# Default list of relationships
# FIXME: using string hack instead of making element
#relationships = makeelement('Relationships',nsprefix='pr')
relationships = etree.fromstring(
'''<Relationships xmlns="http://schemas.openxmlformats.org/package/2006/relationships">
</Relationships>'''
)
count = 0
for relationship in relationshiplist:
# Relationship IDs (rId) start at 1.
relationships.append(makeelement('Relationship', attributes={'Id': 'rId' + str(count + 1),
'Type': relationship[0], 'Target': relationship[1]}, nsprefix=None))
count += 1
return relationships
def makeelement(tagname, tagtext=None, nsprefix='p', attributes=None, attrnsprefix=None):
'''Create an element & return it'''
# Deal with list of nsprefix by making namespacemap
namespacemap = None
if isinstance(nsprefix, list):
namespacemap = {}
for prefix in nsprefix:
namespacemap[prefix] = nsprefixes[prefix]
nsprefix = nsprefix[0] # FIXME: rest of code below expects a single prefix
elif nsprefix:
namespacemap = {nsprefix: nsprefixes[nsprefix]}
else:
# For when namespace = None
nsprefix = 'p'
namespace = '{' + nsprefixes[nsprefix] + '}'
newelement = etree.Element(namespace + tagname, nsmap=namespacemap)
# Add attributes with namespaces
if attributes:
# If they haven't bothered setting attribute namespace, use an empty string
# (equivalent of no namespace)
if not attrnsprefix:
# Quick hack: it seems every element that has a 'w' nsprefix for its tag uses the same prefix for it's attributes
if nsprefix == 'w':
attributenamespace = namespace
else:
attributenamespace = ''
else:
attributenamespace = '{' + nsprefixes[attrnsprefix] + '}'
for tagattribute in attributes:
newelement.set(attributenamespace + tagattribute, attributes[tagattribute])
if tagtext:
newelement.text = tagtext
return newelement
def picture(picname, slide_rels, picdescription='No Description', pixelwidth=None,
pixelheight=None, nochangeaspect=True, nochangearrowheads=True,
template=template_dir, align='center', scale=1):
'''Take a relationshiplist, picture file name, and return a paragraph containing the image and an updated relationshiplist'''
# http://openxmldeveloper.org/articles/462.aspx
# Create an image. Size may be specified, otherwise it will based on the
# pixel size of image. Return a paragraph containing the picture'''
# Copy the file into the media dir
media_dir = join(template, 'ppt', 'media')
if not os.path.isdir(media_dir):
os.mkdir(media_dir)
new_picname = join(media_dir, os.path.basename(picname))
shutil.copyfile(picname, new_picname)
picname = new_picname
# Check if the user has specified a size
if not pixelwidth or not pixelheight:
# If not, get info from the picture itself
pixelwidth, pixelheight = Image.open(picname).size[0:2]
picname = os.path.basename(picname)
# OpenXML measures on-screen objects in English Metric Units
# 1cm = 36000 EMUs
emuperpixel = 12667
width = str(int(pixelwidth * emuperpixel * scale))
height = str(int(pixelheight * emuperpixel * scale))
# Set relationship ID to the first available
picid = len(slide_rels) + 1
picrelid = 'rId' + str(picid)
slide_rels.append([nsprefixes['i'], '../media/' + picname, str(picid)])
# There are 3 main elements inside a picture
# 1. The Blipfill - specifies how the image fills the picture area (stretch, tile, etc.)
blipfill = makeelement('blipFill')
blipfill.append(makeelement('blip', nsprefix='a', attrnsprefix='r', attributes={'embed': picrelid}))
stretch = makeelement('stretch', nsprefix='a')
stretch.append(makeelement('fillRect', nsprefix='a'))
blipfill.append(stretch)
# 2. The non visual picture properties
nvpicpr = makeelement('nvPicPr', nsprefix='p')
cnvpr = makeelement('cNvPr', nsprefix='p',
attributes={'id': '37', 'name': 'BLAH'})
nvpicpr.append(cnvpr)
cnvpicpr = makeelement('cNvPicPr')
nvpicpr.append(cnvpicpr)
nvpicpr.append(makeelement('nvPr'))
# 3. The Shape properties
sppr = makeelement('spPr')
xfrm = makeelement('xfrm', nsprefix='a')
xfrm.append(makeelement('off', nsprefix='a', attributes={'x': '1405440', 'y': '1820520'}))
xfrm.append(makeelement('ext', nsprefix='a', attributes={'cx': width, 'cy': height}))
prstgeom = makeelement('prstGeom', nsprefix='a', attributes={'prst': 'rect'})
prstgeom.append(makeelement('avLst', nsprefix='a'))
sppr.append(xfrm)
sppr.append(prstgeom)
# Add our 3 parts to the picture element
pic = makeelement('pic', nsprefix='p')
pic.append(nvpicpr)
pic.append(blipfill)
pic.append(sppr)
return slide_rels, pic
def savepptx(document, output, slides, media_files, pptrelationships,
contenttypes=contenttypes(), template=template_dir):
'''Save a modified document'''
assert os.path.isdir(template)
docxfile = zipfile.ZipFile(output, mode='w', compression=zipfile.ZIP_DEFLATED)
# Serialize our trees into out zip file
'''
treesandfiles = {document:'ppt/presentation.xml',
contenttypes:'[Content_Types].xml',
pptrelationships:'ppt/_rels/presentation.xml.rels'}
for tree in treesandfiles:
log.info('Saving: '+treesandfiles[tree] )
treestring = etree.tostring(tree, pretty_print=True)
docxfile.writestr(treesandfiles[tree],treestring)
'''
for slide in slides:
treestring = etree.tostring(slide.slide, pretty_print=True)
parser = etree.XMLParser(ns_clean=True)
tree = etree.parse(StringIO(treestring), parser)
treestring = etree.tostring(tree, pretty_print=True)
docxfile.writestr('ppt/slides/slide' + str(slide.number) + '.xml', treestring)
rels_tree = etree.fromstring('''<Relationships xmlns="http://schemas.openxmlformats.org/package/2006/relationships"></Relationships>''')
for rel in slide.relationships:
rel_el = (etree.Element('Relationship'))
rel_el.set('Id', 'rId' + rel[2])
rel_el.set('Type', rel[0])
rel_el.set('Target', rel[1])
rels_tree.append(rel_el)
rels_string = etree.tostring(rels_tree, pretty_print=True)
docxfile.writestr('ppt/slides/_rels/slide' + str(slide.number) + '.xml.rels',
rels_string)
# Add & compress support files
allowed = ['.xml', '.rels']
for dirpath, dirnames, filenames in os.walk(template):
for filename in filenames:
ext = os.path.splitext(filename)[1]
if ext in allowed or filename in allowed or filename in media_files:
doc_file = os.path.join(dirpath, filename)
archivename = doc_file[len(template) + 1:]
docxfile.write(doc_file, archivename)
docxfile.close()
def slide():
sld = makeelement('sld', nsprefix=['p', 'r', 'a'])
csld = makeelement('cSld')
sptree = makeelement('spTree')
nvgrpsppr = makeelement('nvGrpSpPr')
cnvpr = makeelement('cNvPr', attributes={'id': '1', 'name': ''})
cnvgrpsppr = makeelement('cNvGrpSpPr')
nvpr = makeelement('nvPr')
nvgrpsppr.append(cnvpr)
nvgrpsppr.append(cnvgrpsppr)
nvgrpsppr.append(nvpr)
sptree.append(nvgrpsppr)
grpsppr = makeelement('grpSpPr')
xfrm = makeelement('xfrm', nsprefix='a')
xfrm.append(makeelement('off', attributes={'x': '0', 'y': '0'}, nsprefix='a'))
xfrm.append(makeelement('ext', attributes={'cx': '0', 'cy': '0'}, nsprefix='a'))
xfrm.append(makeelement('chOff', attributes={'x': '0', 'y': '0'}, nsprefix='a'))
xfrm.append(makeelement('chExt', attributes={'cx': '0', 'cy': '0'}, nsprefix='a'))
grpsppr.append(xfrm)
sptree.append(grpsppr)
csld.append(sptree)
sld.append(csld)
clrmapovr = makeelement('clrMapOvr')
clrmapovr.append(makeelement('masterClrMapping', nsprefix='a'))
sld.append(clrmapovr)
return sld
def text_box(text):
sp = makeelement('sp')
nvsppr = makeelement('nvSpPr')
nvsppr.append(makeelement('cNvPr', attributes={'id': '37', 'name': 'TextShape 1'}))
nvsppr.append(makeelement('cNvSpPr', attributes={'txBox': '1'}))
nvsppr.append(makeelement('nvPr'))
sp.append(nvsppr)
sppr = makeelement('spPr')
xfrm = makeelement('xfrm', nsprefix='a')
xfrm.append(makeelement('off', nsprefix='a', attributes={'x': '0', 'y': '332656'}))
xfrm.append(makeelement('ext', nsprefix='a', attributes={'cx': '9144000', 'cy': '1262160'}))
sppr.append(xfrm)
prstgeom = makeelement('prstGeom', nsprefix='a', attributes={'prst': 'rect'})
prstgeom.append(makeelement('avLst', nsprefix='a'))
sppr.append(prstgeom)
sp.append(sppr)
txbody = makeelement('txBody')
txbody.append(makeelement('bodyPr', nsprefix='a', attributes={'anchor': 'ctr', 'bIns': '0', 'lIns': '0', 'rIns': '0', 'tIns': '0', 'wrap': 'none'}))
p = makeelement('p', nsprefix='a')
p.append(makeelement('pPr', nsprefix='a', attributes={'algn': 'ctr'}))
r = makeelement('r', nsprefix='a')
r.append(makeelement('rPr', nsprefix='a', attributes={'lang': 'en-GB'}))
r.append(makeelement('t', nsprefix='a', tagtext=text)) # this is where the text goes.
p.append(r)
p.append(makeelement('endParaRPr', nsprefix='a'))
txbody.append(p)
sp.append(txbody)
return sp
class Slide(object):
def __init__(self):
self.slide = slide()
self.relationships = [
[nsprefixes['sl'], '../slideLayouts/slideLayout2.xml', '1']
]
self.number = None
self.media_files = []
return
@classmethod
def create(cls, template_dir):
slide = cls()
slide.template_dir = template_dir
return slide
def add_picture(self, picname, *args, **kwargs):
extension = os.path.splitext(picname)[1]
if extension not in ['.jpg', '.jpeg', '.png']:
raise ValueError
self.relationships, pic = picture(picname, slide_rels=self.relationships,
template=self.template_dir, *args, **kwargs)
self.slide.xpath('/p:sld/p:cSld/p:spTree', namespaces=nsprefixes)[0].append(pic)
self.media_files.append(os.path.basename(picname))
def add_text_box(self, text):
self.slide.xpath('/p:sld/p:cSld/p:spTree', namespaces=nsprefixes)[0].append(
text_box(text))
class Document(object):
def __init__(self):
self.relationshiplist = relationshiplist()
self.slide_rels = [] # Each member of this list will be a list of relationships for a particular slide. Each relationship is itself a list, whose first member is the Type of the relationship (a namespace) and whose second member is the Target for the relationship.
self.tmpdir = tempfile.mkdtemp()
self.template_dir = os.path.join(self.tmpdir, 'template')
shutil.copytree(template_dir, self.template_dir) # we copy our template files to a temp location
return
@classmethod
def create(cls):
doc = cls()
doc.presentation = makeelement('presentation')
master_id_list = makeelement('sldMasterIdLst')
master_id_list.append(makeelement('sldMasterId', attributes={'id': '2147483648', '{' + nsprefixes['r'] + '}' + 'id': 'rId2'}))
doc.presentation.append(master_id_list)
doc.presentation.append(makeelement('sldIdLst'))
doc.presentation.append(makeelement('sldSz', attributes={'cx': '10080625',
'cy': '7559675'}))
doc.presentation.append(makeelement('notesSz', attributes={'cx': '7559675',
'cy': '10691812'}))
doc.slides = []
return doc
def add_slide(self):
slide = Slide.create(template_dir=self.template_dir)
slide.number = len(self.slides) + 1
self.slides.append(slide)
slide_list = self.presentation.xpath('/p:presentation/p:sldIdLst',
namespaces=nsprefixes)[0]
slide_list.append(makeelement('sldId',
attributes={'id': str(256 + len(self.slides) - 1),
'{' + nsprefixes['r'] + '}' + 'id': 'rId' + str(3 + len(self.slides) - 1)}))
return slide
def save(self, filename, *args, **kwargs):
media_files = []
for slide in self.slides:
media_files += slide.media_files
suffix = '.pptx'
if filename[-5:] != suffix: filename = filename + suffix
return savepptx(document=self.presentation, slides=self.slides,
media_files=media_files, template=self.template_dir,
output=filename,
pptrelationships=pptrelationships(self.relationshiplist),
*args, **kwargs)
def get_file_object(self, *args, **kwargs):
'''Get the document as a file-like object.'''
filedir = tempfile.mkdtemp()
filepath = os.path.join(filedir, 'rendered_pptx.pptx')
self.save(filename=filepath)
f = open(filepath)
shutil.rmtree(filedir)
return f
def get_as_string(self, *args, **kwargs):
return self.get_file_object(*args, **kwargs).read()
def close(self):
shutil.rmtree(self.tmpdir) | 0.267983 | 0.084041 |
import os
import warnings
import logging
import re
from typing import Dict, Union, List, Any, NoReturn
from aitool.datasets import PATH as DATA_PATH
from aitool import is_file_exist, load_lines, prepare_data, load_json
chinese_family_name = set()
File_Bag = ['https://pcg-xyj-1258344701.cos.ap-guangzhou.myqcloud.com/aitool/words.zip', os.path.join(DATA_PATH, 'nlp', 'words')]
File_Chinese_Family_Name = os.path.join(DATA_PATH, 'nlp', 'words', 'Names', 'Chinese_family_name.txt')
def has_family_name(name: str) -> bool:
if not chinese_family_name:
if not is_file_exist(File_Chinese_Family_Name):
prepare_data(*File_Bag, packed=True)
for item in load_lines(File_Chinese_Family_Name):
chinese_family_name.add(item)
if len(name) > 0 and name[0] in chinese_family_name:
return True
return False
word_common = set()
DIR_THUOCL = os.path.join(DATA_PATH, 'nlp', 'words', 'THUOCL')
FILE_XINHUA_CI = os.path.join(DATA_PATH, 'nlp', 'words', 'XINHUA', 'ci.json')
def init_word_common(threshold: int = 10) -> NoReturn:
if not is_file_exist(DIR_THUOCL) or not is_file_exist(FILE_XINHUA_CI):
prepare_data(*File_Bag, packed=True)
files = os.listdir(DIR_THUOCL)
for file in files:
with open(os.path.join(DIR_THUOCL, file), 'r') as fin:
for line in fin:
w, f = line.strip().split('\t')
if eval(f) > threshold:
word_common.add(w)
data = load_json(FILE_XINHUA_CI)
for line in data:
word_common.add(line['ci'])
def is_common_word(text: str) -> bool:
if not word_common:
init_word_common()
if text in word_common:
return True
return False
word_stop = set()
FILE_STOPWORDS = os.path.join(DATA_PATH, 'nlp', 'words', 'stopwords.txt')
def init_word_stop() -> NoReturn:
with open(FILE_STOPWORDS, 'r') as fin:
for line in fin:
word = line.strip()
word_stop.add(word)
def is_stop_word(text: str) -> bool:
if not word_stop:
init_word_stop()
if text in word_stop:
return True
return False
relationship_title = {}
FILE_RELATIONSHIP = os.path.join(DATA_PATH, 'nlp', 'words', 'Names', 'relationship.txt')
def init_relationship_title() -> NoReturn:
if not is_file_exist(FILE_RELATIONSHIP):
prepare_data(*File_Bag, packed=True)
relationship_title_addition = {'店长', '法师', '醫生', '大力士', '护士', '父亲', '天后', '教练', '保安', '计师', '管事',
'知事', '道长', '妃', '母亲', '头目', '乞丐', '妻', '局长', '官员', '', '', '', '', '',
'', '', '', '', '', '', '', '', '', '', '', '', '', ''}
data = set(load_lines(FILE_RELATIONSHIP)) | relationship_title_addition
for item in data:
item_len = len(item)
if item_len not in relationship_title:
relationship_title[item_len] = set()
relationship_title[item_len].add(item)
def is_relationship_title(text: str) -> bool:
if not relationship_title:
init_relationship_title()
text_len = len(text)
for title_len, title_items in relationship_title.items():
if title_len + 1 != text_len:
continue
if text[:-1] in title_items or text[1:] in title_items:
return True
return False
def is_contains_english(text: str) -> bool:
for c in text:
_ord = ord(c)
if 65 <= _ord <= 90 or 97 <= _ord <= 122:
return True
return False
cut_until_char_delimiter = set('(([〔【')
def cut_until_char(text: str, delimiter: Union[tuple, str, list] = cut_until_char_delimiter) -> str:
for index, char in enumerate(text):
if char in delimiter:
return text[:index]
return text
delete_char_discard = set(')〕--”—.“《)')
def delete_char(text: str, discard: Union[tuple, str, list] = delete_char_discard):
new_text = ''
for char in text:
if char not in discard:
new_text += char
return new_text
nick_name_prefix = set('小大老阿男女')
nick_name_postfix_1 = set('哥姐总')
nick_name_postfix_2 = set('甲乙丙丁戊己庚辛壬癸一二三四五六七八九十')
def is_nick_name(text: str, ) -> bool:
if len(text) == 2:
if text[0] in nick_name_prefix or text[-1] in nick_name_postfix_1:
return True
if text[0] in nick_name_postfix_2 or text[1] in nick_name_postfix_2:
return True
if len(text) == 3:
if text[0] in nick_name_postfix_2 and text[1] in nick_name_postfix_2:
return True
return False
def is_contains_figure(text: str) -> bool:
for char in text:
if char.isdigit():
return True
return False
age_describes = ['童年', '老奶奶', '幼年', '老年', '少年', ]
def delete_age_describe(text: str) -> str:
for describe in age_describes:
text = text.replace(describe, '')
return text
def is_contains_chinese(strs) -> bool:
for _char in strs:
if '\u4e00' <= _char <= '\u9fff':
return True
return False
def is_all_chinese(text: str) -> bool:
for char in text:
if (not is_contains_chinese(char)) and (char not in '·‧:∙:'):
return False
return True
black_name = {'未知', '收废品员', '开发商', '理发师', '小棉袄', '大高个', '地下党', '', '', '', '', '', '', '', '', '', '',
'', '', '', '',
'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
'', '', '', '', ''}
def is_black_name(text: str) -> bool:
warnings.warn("is_black_name will be deprecated "
"because black_name should be disassemble to relationship_title", DeprecationWarning)
if text in black_name:
return True
return False
def clean_role(text: str) -> (str, int):
score = 10
text = cut_until_char(text)
text = delete_char(text)
text = delete_age_describe(text)
if not text:
return '', -100
if not is_contains_chinese:
return '', -100
if has_family_name(text):
score -= 1
if is_common_word(text):
score -= 1
if is_relationship_title(text):
score -= 1
if is_contains_english(text):
score -= 1
if is_nick_name(text):
score -= 1
if is_contains_figure(text):
score -= 1
if is_all_chinese(text):
score += 1
if is_black_name(text):
score -= 1
logging.info('{}, {}'.format(text, score))
return text, score
def clean_alias(text: str) -> (str, int):
score = 13
if not text:
return '', -100
if not is_contains_chinese:
score -= 5
if is_common_word(text):
score -= 5
if is_relationship_title(text):
score -= 3
if is_contains_english(text):
score -= 1
if is_nick_name(text):
score -= 1
if is_contains_figure(text):
score -= 1
if is_all_chinese(text):
score += 1
logging.info('{}, {}'.format(text, score))
return text, score
import sys
from unicodedata import category
punctuation_chars = set([chr(i) for i in range(sys.maxunicode)
if category(chr(i)).startswith("P")])
punctuation_chars |= set(['`', ' '])
def is_punctuation(char):
return char in punctuation_chars
def select_nested_text(
text: str,
deep_add: tuple = ('《',),
deep_reduce: tuple = ('》',)
) -> str:
new_text = ''
deep = 0
for char in text:
if char in deep_reduce and deep > 0:
deep -= 1
if deep != 0:
new_text += char
if char in deep_add:
deep += 1
return new_text
def delete_nested_text(
text: str,
deep_add: tuple = ('(', '(', '[', '【',),
deep_reduce: tuple = (')', ')', ']', '】',)
) -> str:
# 删除以()、()修饰的嵌套成分
new_text = ''
deep = 0
for char in text:
if char in deep_add:
deep += 1
if deep == 0:
new_text += char
if char in deep_reduce and deep > 0:
deep -= 1
return new_text
pattern_1 = re.compile(r'(全|第)[1-90一二三四五六七八九十零〇]+(章|季|集|部分|部|卷)')
pattern_2 = re.compile(r'[::]*[(([]*(结局一|结局二|结局三|法国版|完全版|剧场版|电影版|剧场剪辑版|印度版|合体版+|合体版|重置版|合体剧场版|原版|正序版|精编版|粤语版|总集篇|电视剧版|电视剧|全集|动画版)[))\]]*')
pattern_3 = re.compile(r'[1-90一二三四五六七八九十零〇]+$')
def get_core_ip(ip: str) -> str:
ip = delete_nested_text(ip)
ip = re.sub(pattern_1, '', ip)
ip = re.sub(pattern_2, '', ip)
ip = re.sub(pattern_3, '', ip)
ip = ip.split(' ')[0]
ip = ip.split('-')[0]
return ip
def is_sub_ip(ip: str) -> bool:
if re.search(pattern_1, ip):
return True
if re.findall(pattern_2, ip):
return True
if re.findall(pattern_3, ip):
return True
return False
if __name__ == '__main__':
print(has_family_name('项羽'))
print(has_family_name('翼德'))
print(is_contains_english('小bird'))
print(is_contains_english('项羽'))
print(cut_until_char('A(B'))
print(cut_until_char('【AB'))
print(is_common_word('汽车'))
print(is_common_word('唐三'))
print(is_relationship_title('张老师'))
print(is_relationship_title('唐三'))
print(clean_role('汽车'))
print(clean_role('唐三'))
print(clean_role('唐三(主角)'))
print(get_core_ip('托马斯和他的朋友们第十九部分'))
print(get_core_ip('托马斯和他的朋友们19'))
print(get_core_ip('托马斯和他的朋友们结局一'))
print(get_core_ip('托马斯和他的朋友们(结局一))'))
print(get_core_ip('托马斯 和他的朋友们:(结局一)'))
print(get_core_ip('斗罗大陆(全14卷)'))
print(get_core_ip('一'))
print(is_sub_ip('托马斯和他的朋友们第十九部分'))
print(is_sub_ip('托马斯和他的朋友们19'))
print(is_sub_ip('托马斯和他的朋友们结局一'))
print(is_sub_ip('托马斯和他的朋友们(结局一))'))
print(is_sub_ip('托马斯 和他的朋友们:(结局一)'))
print(is_sub_ip('斗罗大陆(全14卷)'))
print(is_sub_ip('一'))
print(select_nested_text('《xxxx》'))
print(is_stop_word('的'))
print(is_stop_word('匿')) | aitool/task_customized/ip_enhance/filter.py | import os
import warnings
import logging
import re
from typing import Dict, Union, List, Any, NoReturn
from aitool.datasets import PATH as DATA_PATH
from aitool import is_file_exist, load_lines, prepare_data, load_json
chinese_family_name = set()
File_Bag = ['https://pcg-xyj-1258344701.cos.ap-guangzhou.myqcloud.com/aitool/words.zip', os.path.join(DATA_PATH, 'nlp', 'words')]
File_Chinese_Family_Name = os.path.join(DATA_PATH, 'nlp', 'words', 'Names', 'Chinese_family_name.txt')
def has_family_name(name: str) -> bool:
if not chinese_family_name:
if not is_file_exist(File_Chinese_Family_Name):
prepare_data(*File_Bag, packed=True)
for item in load_lines(File_Chinese_Family_Name):
chinese_family_name.add(item)
if len(name) > 0 and name[0] in chinese_family_name:
return True
return False
word_common = set()
DIR_THUOCL = os.path.join(DATA_PATH, 'nlp', 'words', 'THUOCL')
FILE_XINHUA_CI = os.path.join(DATA_PATH, 'nlp', 'words', 'XINHUA', 'ci.json')
def init_word_common(threshold: int = 10) -> NoReturn:
if not is_file_exist(DIR_THUOCL) or not is_file_exist(FILE_XINHUA_CI):
prepare_data(*File_Bag, packed=True)
files = os.listdir(DIR_THUOCL)
for file in files:
with open(os.path.join(DIR_THUOCL, file), 'r') as fin:
for line in fin:
w, f = line.strip().split('\t')
if eval(f) > threshold:
word_common.add(w)
data = load_json(FILE_XINHUA_CI)
for line in data:
word_common.add(line['ci'])
def is_common_word(text: str) -> bool:
if not word_common:
init_word_common()
if text in word_common:
return True
return False
word_stop = set()
FILE_STOPWORDS = os.path.join(DATA_PATH, 'nlp', 'words', 'stopwords.txt')
def init_word_stop() -> NoReturn:
with open(FILE_STOPWORDS, 'r') as fin:
for line in fin:
word = line.strip()
word_stop.add(word)
def is_stop_word(text: str) -> bool:
if not word_stop:
init_word_stop()
if text in word_stop:
return True
return False
relationship_title = {}
FILE_RELATIONSHIP = os.path.join(DATA_PATH, 'nlp', 'words', 'Names', 'relationship.txt')
def init_relationship_title() -> NoReturn:
if not is_file_exist(FILE_RELATIONSHIP):
prepare_data(*File_Bag, packed=True)
relationship_title_addition = {'店长', '法师', '醫生', '大力士', '护士', '父亲', '天后', '教练', '保安', '计师', '管事',
'知事', '道长', '妃', '母亲', '头目', '乞丐', '妻', '局长', '官员', '', '', '', '', '',
'', '', '', '', '', '', '', '', '', '', '', '', '', ''}
data = set(load_lines(FILE_RELATIONSHIP)) | relationship_title_addition
for item in data:
item_len = len(item)
if item_len not in relationship_title:
relationship_title[item_len] = set()
relationship_title[item_len].add(item)
def is_relationship_title(text: str) -> bool:
if not relationship_title:
init_relationship_title()
text_len = len(text)
for title_len, title_items in relationship_title.items():
if title_len + 1 != text_len:
continue
if text[:-1] in title_items or text[1:] in title_items:
return True
return False
def is_contains_english(text: str) -> bool:
for c in text:
_ord = ord(c)
if 65 <= _ord <= 90 or 97 <= _ord <= 122:
return True
return False
cut_until_char_delimiter = set('(([〔【')
def cut_until_char(text: str, delimiter: Union[tuple, str, list] = cut_until_char_delimiter) -> str:
for index, char in enumerate(text):
if char in delimiter:
return text[:index]
return text
delete_char_discard = set(')〕--”—.“《)')
def delete_char(text: str, discard: Union[tuple, str, list] = delete_char_discard):
new_text = ''
for char in text:
if char not in discard:
new_text += char
return new_text
nick_name_prefix = set('小大老阿男女')
nick_name_postfix_1 = set('哥姐总')
nick_name_postfix_2 = set('甲乙丙丁戊己庚辛壬癸一二三四五六七八九十')
def is_nick_name(text: str, ) -> bool:
if len(text) == 2:
if text[0] in nick_name_prefix or text[-1] in nick_name_postfix_1:
return True
if text[0] in nick_name_postfix_2 or text[1] in nick_name_postfix_2:
return True
if len(text) == 3:
if text[0] in nick_name_postfix_2 and text[1] in nick_name_postfix_2:
return True
return False
def is_contains_figure(text: str) -> bool:
for char in text:
if char.isdigit():
return True
return False
age_describes = ['童年', '老奶奶', '幼年', '老年', '少年', ]
def delete_age_describe(text: str) -> str:
for describe in age_describes:
text = text.replace(describe, '')
return text
def is_contains_chinese(strs) -> bool:
for _char in strs:
if '\u4e00' <= _char <= '\u9fff':
return True
return False
def is_all_chinese(text: str) -> bool:
for char in text:
if (not is_contains_chinese(char)) and (char not in '·‧:∙:'):
return False
return True
black_name = {'未知', '收废品员', '开发商', '理发师', '小棉袄', '大高个', '地下党', '', '', '', '', '', '', '', '', '', '',
'', '', '', '',
'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
'', '', '', '', ''}
def is_black_name(text: str) -> bool:
warnings.warn("is_black_name will be deprecated "
"because black_name should be disassemble to relationship_title", DeprecationWarning)
if text in black_name:
return True
return False
def clean_role(text: str) -> (str, int):
score = 10
text = cut_until_char(text)
text = delete_char(text)
text = delete_age_describe(text)
if not text:
return '', -100
if not is_contains_chinese:
return '', -100
if has_family_name(text):
score -= 1
if is_common_word(text):
score -= 1
if is_relationship_title(text):
score -= 1
if is_contains_english(text):
score -= 1
if is_nick_name(text):
score -= 1
if is_contains_figure(text):
score -= 1
if is_all_chinese(text):
score += 1
if is_black_name(text):
score -= 1
logging.info('{}, {}'.format(text, score))
return text, score
def clean_alias(text: str) -> (str, int):
score = 13
if not text:
return '', -100
if not is_contains_chinese:
score -= 5
if is_common_word(text):
score -= 5
if is_relationship_title(text):
score -= 3
if is_contains_english(text):
score -= 1
if is_nick_name(text):
score -= 1
if is_contains_figure(text):
score -= 1
if is_all_chinese(text):
score += 1
logging.info('{}, {}'.format(text, score))
return text, score
import sys
from unicodedata import category
punctuation_chars = set([chr(i) for i in range(sys.maxunicode)
if category(chr(i)).startswith("P")])
punctuation_chars |= set(['`', ' '])
def is_punctuation(char):
return char in punctuation_chars
def select_nested_text(
text: str,
deep_add: tuple = ('《',),
deep_reduce: tuple = ('》',)
) -> str:
new_text = ''
deep = 0
for char in text:
if char in deep_reduce and deep > 0:
deep -= 1
if deep != 0:
new_text += char
if char in deep_add:
deep += 1
return new_text
def delete_nested_text(
text: str,
deep_add: tuple = ('(', '(', '[', '【',),
deep_reduce: tuple = (')', ')', ']', '】',)
) -> str:
# 删除以()、()修饰的嵌套成分
new_text = ''
deep = 0
for char in text:
if char in deep_add:
deep += 1
if deep == 0:
new_text += char
if char in deep_reduce and deep > 0:
deep -= 1
return new_text
pattern_1 = re.compile(r'(全|第)[1-90一二三四五六七八九十零〇]+(章|季|集|部分|部|卷)')
pattern_2 = re.compile(r'[::]*[(([]*(结局一|结局二|结局三|法国版|完全版|剧场版|电影版|剧场剪辑版|印度版|合体版+|合体版|重置版|合体剧场版|原版|正序版|精编版|粤语版|总集篇|电视剧版|电视剧|全集|动画版)[))\]]*')
pattern_3 = re.compile(r'[1-90一二三四五六七八九十零〇]+$')
def get_core_ip(ip: str) -> str:
ip = delete_nested_text(ip)
ip = re.sub(pattern_1, '', ip)
ip = re.sub(pattern_2, '', ip)
ip = re.sub(pattern_3, '', ip)
ip = ip.split(' ')[0]
ip = ip.split('-')[0]
return ip
def is_sub_ip(ip: str) -> bool:
if re.search(pattern_1, ip):
return True
if re.findall(pattern_2, ip):
return True
if re.findall(pattern_3, ip):
return True
return False
if __name__ == '__main__':
print(has_family_name('项羽'))
print(has_family_name('翼德'))
print(is_contains_english('小bird'))
print(is_contains_english('项羽'))
print(cut_until_char('A(B'))
print(cut_until_char('【AB'))
print(is_common_word('汽车'))
print(is_common_word('唐三'))
print(is_relationship_title('张老师'))
print(is_relationship_title('唐三'))
print(clean_role('汽车'))
print(clean_role('唐三'))
print(clean_role('唐三(主角)'))
print(get_core_ip('托马斯和他的朋友们第十九部分'))
print(get_core_ip('托马斯和他的朋友们19'))
print(get_core_ip('托马斯和他的朋友们结局一'))
print(get_core_ip('托马斯和他的朋友们(结局一))'))
print(get_core_ip('托马斯 和他的朋友们:(结局一)'))
print(get_core_ip('斗罗大陆(全14卷)'))
print(get_core_ip('一'))
print(is_sub_ip('托马斯和他的朋友们第十九部分'))
print(is_sub_ip('托马斯和他的朋友们19'))
print(is_sub_ip('托马斯和他的朋友们结局一'))
print(is_sub_ip('托马斯和他的朋友们(结局一))'))
print(is_sub_ip('托马斯 和他的朋友们:(结局一)'))
print(is_sub_ip('斗罗大陆(全14卷)'))
print(is_sub_ip('一'))
print(select_nested_text('《xxxx》'))
print(is_stop_word('的'))
print(is_stop_word('匿')) | 0.483648 | 0.10786 |
import logging
from copy import deepcopy
from typing import Optional
import numpy as np
import scipy.linalg as linalg
from ...misc import restack
from ...misc import unstack
from ...model.dynamical_system.boundary_value_problem import MultipleBoundaryValueProblem
from ...model.dynamical_system.flow_problem import LinearFlow
from ...model.dynamical_system.initial_value_problem import InitialValueProblem
from ...model.optimization.optimal_control import LQOptimalControl
from ...model.variables.time_function import Time
from ...solver_container import solver_container_factory
from ..base_solver import BaseSolver
from ..base_solver import TimeSolution
logger = logging.getLogger(__name__)
class MultipleShooting(BaseSolver):
"""Solver for solving MultipleBoundaryValueProblems with a multiple shooting approach."""
def __init__(self,
bvp: MultipleBoundaryValueProblem,
flow_problem: LinearFlow,
ivp_problem: InitialValueProblem,
shooting_nodes,
stepsize=1e-1,
*args,
**kwargs):
if not isinstance(bvp, MultipleBoundaryValueProblem):
raise TypeError(bvp)
if not isinstance(flow_problem, LinearFlow):
raise TypeError(flow_problem)
if not isinstance(ivp_problem, InitialValueProblem):
raise TypeError(ivp_problem)
self._bvp = bvp
self._shooting_nodes = shooting_nodes
self._n_shooting_nodes = len(shooting_nodes)
self._boundary_values = bvp.boundary_values.boundary_values
self._bvp_nodes = bvp.nodes
self._check_shooting_nodes()
self._intervals = zip(self._shooting_nodes, self._shooting_nodes[1:])
self._final_time = self._shooting_nodes[-1]
self._inner_nodes = shooting_nodes[1:-1]
self._dynamical_system = bvp.dynamical_system
self._dynamical_system.reset()
self._variables = self._dynamical_system.variables
self._nn = self._dynamical_system.nn
self._flow_problem = flow_problem
self._ivp_problem = ivp_problem
self._stepsize = stepsize
super().__init__(*args, **kwargs)
def _init_solver(self,
time_interval: Optional[Time],
flow_abs_tol: Optional[float] = None,
flow_rel_tol: Optional[float] = None) -> None:
self._set_t2s()
self._set_d_as()
self._init_flow_solver()
self._flows = self._get_homogeneous_flows(flow_abs_tol, flow_rel_tol)
self._set_gis()
self._shooting_values = self._get_shooting_values()
if time_interval is None:
time_interval = self._variables.time
self._time_interval = time_interval
if time_interval.grid is None:
time_interval.grid = self._shooting_nodes
self._solution_time_grid = time_interval.grid
def _init_flow_solver(self) -> None:
time_interval = deepcopy(self._bvp.time_interval)
time_interval.grid = self._shooting_nodes
stepsize = 1. / (self._n_shooting_nodes - 1)
logger.debug("Creating flow solver with time_interval: {}".format(
time_interval.grid))
self._flow_solver = solver_container_factory.get_solver_container(
self._flow_problem).default_solver.solver(self._flow_problem,
time_interval,
stepsize,
rel_tol=self.rel_tol,
abs_tol=self.abs_tol)
def _get_homogeneous_flows(self,
abs_tol: Optional[float] = None,
rel_tol: Optional[float] = None) -> np.ndarray:
flow_solver = self._flow_solver
if abs_tol is not None:
flow_solver.abs_tol = abs_tol
if rel_tol is not None:
flow_solver.rel_tol = rel_tol
return flow_solver.get_homogeneous_flows()
def _get_shooting_values(self) -> np.ndarray:
n = self._nn
rank = self._dynamical_system.rank
shooting_values = np.zeros((n, n, self._n_shooting_nodes), order='F')
j = 0
indices = np.array([], dtype=int)
for i, node in enumerate(self._shooting_nodes):
if node in self._bvp_nodes:
shooting_values[..., i] = self._boundary_values[..., j]
indices = np.append(indices, [i])
j += 1
z_gamma = self._bvp.boundary_values.z_gamma
projected_values = np.einsum('ai, ajr,jkr->ikr', z_gamma,
shooting_values, self._t2s)
self._boundary_indices = indices
return projected_values.reshape(rank,
rank * self._n_shooting_nodes,
order='F')
def _check_shooting_nodes(self) -> None:
for node in self._bvp_nodes:
if node not in self._shooting_nodes:
raise ValueError(
"Shooting nodes {}\nhave to include boundary value nodes {}"
.format(self._shooting_nodes, self._bvp_nodes))
@property
def bvp(self) -> MultipleBoundaryValueProblem:
return self._bvp
def _get_shooting_matrix(self) -> np.ndarray:
gis = self._gis
jis = self._compute_jis()
dim = self._dynamical_system.rank * self._n_shooting_nodes
shooting_matrix = np.zeros((dim, dim), order='F')
# TODO: Inefficient, probably needs low level implementation
diag = linalg.block_diag(*(gis[..., i] for i in range(gis.shape[-1])))
shooting_matrix[:diag.shape[0], :diag.shape[1]] = -diag
for i in range(self._n_shooting_nodes)[:-1]:
size = self._dynamical_system.rank * i
sizep1 = self._dynamical_system.rank * (i + 1)
sizep2 = self._dynamical_system.rank * (i + 2)
shooting_matrix[size:sizep1, sizep1:sizep2] = jis[..., i]
shooting_matrix[sizep1:, :] = self._shooting_values
return shooting_matrix
def _get_mesh(self, stepsize: float) -> np.ndarray:
mesh = np.concatenate((np.arange(t_lower, t_upper, stepsize)
for t_lower, t_upper in self._intervals))
return mesh
def _set_gis(self) -> None:
t2 = self._t2s
t2_1 = t2[:, :, 1:]
t2_e = t2[:, :, :-1]
gis = np.einsum('jir,jkr,klr->ilr', t2_1, self._flows, t2_e)
self._gis: np.ndarray = gis
def _compute_jis(self) -> np.ndarray:
# TODO: only works in the linear case
rank = self._dynamical_system.rank
ntemp = self._n_shooting_nodes - 1
return np.array(ntemp * (np.identity(rank), )).T
def _set_t2s(self) -> None:
rank = self._dynamical_system.rank
t2s = np.zeros((self._nn, rank, self._n_shooting_nodes), order='F')
for i, node in enumerate(self._shooting_nodes):
t2s[:, :, i] = self._dynamical_system.t2(node)
self._t2s = t2s
def _newton_step(self, current_node_states: np.ndarray,
rhs: np.ndarray) -> np.ndarray:
shooting_matrix = self._get_shooting_matrix()
lin_sys_sol = np.linalg.solve(shooting_matrix, rhs)
next_nodes = current_node_states - restack(lin_sys_sol,
current_node_states.shape)
return next_nodes
def _get_newton_rhs(self, current_node_states: np.ndarray) -> np.ndarray:
current_x_d = self._get_x_d(current_node_states)
boundary_node_states = current_x_d[..., self._boundary_indices]
bound_res = self._bvp.boundary_values.residual(boundary_node_states)
computed_node_states = self._forward_projected_nodes(
current_node_states)
diffs = current_node_states[..., 1:] - computed_node_states[..., :-1]
res_b = unstack(diffs)
return np.concatenate((res_b, bound_res), axis=0)
def _forward_projected_nodes(self, node_values: np.ndarray) -> np.ndarray:
solver = self._flow_solver
lift_up = self._get_x_d(node_values)
sol = solver.forward_solve_differential(lift_up)
return self._project_values(sol)
def _get_initial_guess(self, initial_guess: Optional[np.ndarray] = None
) -> np.ndarray:
shape = self._dynamical_system.variables.shape + (
self._n_shooting_nodes, )
if initial_guess is None:
initial_guess = np.zeros(shape, order='F')
elif initial_guess.shape != shape:
raise ValueError(
"initial_guess.shape is {} but should equal {}".format(
initial_guess.shape, shape))
return initial_guess
def _project_values(self, values: np.ndarray) -> np.ndarray:
return np.einsum('ijr,i...r->j...r', self._t2s, values)
def _run(self,
time_interval=None,
initial_guess=None,
dynamic_update=False,
*args,
**kwargs) -> TimeSolution:
logger.info('''MultipleShooting solver initialized with\n
shooting_nodes: {}\n
boundary_nodes: {}\n'''.format(self._shooting_nodes, self._bvp_nodes))
self._init_solver(time_interval, *args, **kwargs)
initial_guess = self._get_initial_guess(initial_guess)
projected_values = self._project_values(initial_guess)
for i in range(self.max_iter):
residual = self._get_newton_rhs(projected_values)
if self.abort(residual):
break
projected_values = self._newton_step(projected_values, residual)
x_d = self._get_x_d(projected_values)
full_node_values = x_d - np.einsum('ijr,j...r->i...r', self._das,
x_d) - self._fas
node_solution = TimeSolution(self._shooting_nodes, full_node_values)
if dynamic_update:
time_grid_solution = deepcopy(node_solution)
time_grid_solution.dynamic_update = self._get_intermediate_values
else:
time_grid_solution = self._get_intermediate_values(
node_solution, self._solution_time_grid)
self._bvp.variables.current_values = time_grid_solution
return time_grid_solution
def _get_intermediate_values(self, node_solution: np.ndarray,
time_grid: np.ndarray) -> TimeSolution:
# this is rather slow, but it's an inherent disadvantage of the shooting approach
logger.debug(
"Getting intermediate values on time grid of size: {}".format(
time_grid.size))
idsize = time_grid.size
idx = np.append(
np.searchsorted(time_grid, node_solution.time_grid, 'left'),
idsize)
solution = np.zeros(
(*node_solution.solution.shape[:-1], time_grid.size))
f_columns = self._bvp.boundary_values.n_inhom
self._ivp_problem.init_solver(stepsize=self._stepsize,
f_columns=f_columns,
abs_tol=self.abs_tol,
rel_tol=self.abs_tol)
# TODO: Also compute backwards for better stability / difference to node points
for i, node in enumerate(node_solution.time_grid):
loweridx = idx[i]
upperidx = idx[i + 1]
grid = time_grid[loweridx:upperidx]
t0 = node
if grid.size == 0:
continue
x0 = node_solution(node)
tf = grid[-1]
interval = Time(t0, tf, grid)
x0_times = self._ivp_problem.solve(interval, x0)
if node in grid or x0_times.solution.shape[-1] == 1:
idx_increment = 0
else:
idx_increment = 1
logger.debug("x0_timesshape: {}".format(x0_times.solution.shape))
solution[..., loweridx:upperidx] = np.atleast_2d(
x0_times.solution.T).T[..., idx_increment:]
return TimeSolution(time_grid, solution)
def _get_x_d(self, projected_values: np.ndarray) -> np.ndarray:
return np.einsum('ijr,j...r->i...r', self._t2s, projected_values)
def _set_d_as(self) -> None:
das = np.zeros((self._nn, self._nn, self._n_shooting_nodes), order='F')
shape = self._dynamical_system.variables.shape
fas = np.zeros((*shape, self._n_shooting_nodes), order='F')
self._dynamical_system.init_rank()
for i, node in enumerate(self._shooting_nodes):
das[:, :, i] = self._dynamical_system.d_a(node)
fas[..., i] = self._dynamical_system.f_a(node)
self._das = das
self._fas = fas
solver_container_factory.register_solver(MultipleBoundaryValueProblem,
MultipleShooting,
default=True)
solver_container_factory.register_solver(
LQOptimalControl,
MultipleShooting,
default=True,
creator_function=LQOptimalControl.get_bvp) | src/pymloc/solvers/dynamical_systems/multiple_shooting.py | import logging
from copy import deepcopy
from typing import Optional
import numpy as np
import scipy.linalg as linalg
from ...misc import restack
from ...misc import unstack
from ...model.dynamical_system.boundary_value_problem import MultipleBoundaryValueProblem
from ...model.dynamical_system.flow_problem import LinearFlow
from ...model.dynamical_system.initial_value_problem import InitialValueProblem
from ...model.optimization.optimal_control import LQOptimalControl
from ...model.variables.time_function import Time
from ...solver_container import solver_container_factory
from ..base_solver import BaseSolver
from ..base_solver import TimeSolution
logger = logging.getLogger(__name__)
class MultipleShooting(BaseSolver):
"""Solver for solving MultipleBoundaryValueProblems with a multiple shooting approach."""
def __init__(self,
bvp: MultipleBoundaryValueProblem,
flow_problem: LinearFlow,
ivp_problem: InitialValueProblem,
shooting_nodes,
stepsize=1e-1,
*args,
**kwargs):
if not isinstance(bvp, MultipleBoundaryValueProblem):
raise TypeError(bvp)
if not isinstance(flow_problem, LinearFlow):
raise TypeError(flow_problem)
if not isinstance(ivp_problem, InitialValueProblem):
raise TypeError(ivp_problem)
self._bvp = bvp
self._shooting_nodes = shooting_nodes
self._n_shooting_nodes = len(shooting_nodes)
self._boundary_values = bvp.boundary_values.boundary_values
self._bvp_nodes = bvp.nodes
self._check_shooting_nodes()
self._intervals = zip(self._shooting_nodes, self._shooting_nodes[1:])
self._final_time = self._shooting_nodes[-1]
self._inner_nodes = shooting_nodes[1:-1]
self._dynamical_system = bvp.dynamical_system
self._dynamical_system.reset()
self._variables = self._dynamical_system.variables
self._nn = self._dynamical_system.nn
self._flow_problem = flow_problem
self._ivp_problem = ivp_problem
self._stepsize = stepsize
super().__init__(*args, **kwargs)
def _init_solver(self,
time_interval: Optional[Time],
flow_abs_tol: Optional[float] = None,
flow_rel_tol: Optional[float] = None) -> None:
self._set_t2s()
self._set_d_as()
self._init_flow_solver()
self._flows = self._get_homogeneous_flows(flow_abs_tol, flow_rel_tol)
self._set_gis()
self._shooting_values = self._get_shooting_values()
if time_interval is None:
time_interval = self._variables.time
self._time_interval = time_interval
if time_interval.grid is None:
time_interval.grid = self._shooting_nodes
self._solution_time_grid = time_interval.grid
def _init_flow_solver(self) -> None:
time_interval = deepcopy(self._bvp.time_interval)
time_interval.grid = self._shooting_nodes
stepsize = 1. / (self._n_shooting_nodes - 1)
logger.debug("Creating flow solver with time_interval: {}".format(
time_interval.grid))
self._flow_solver = solver_container_factory.get_solver_container(
self._flow_problem).default_solver.solver(self._flow_problem,
time_interval,
stepsize,
rel_tol=self.rel_tol,
abs_tol=self.abs_tol)
def _get_homogeneous_flows(self,
abs_tol: Optional[float] = None,
rel_tol: Optional[float] = None) -> np.ndarray:
flow_solver = self._flow_solver
if abs_tol is not None:
flow_solver.abs_tol = abs_tol
if rel_tol is not None:
flow_solver.rel_tol = rel_tol
return flow_solver.get_homogeneous_flows()
def _get_shooting_values(self) -> np.ndarray:
n = self._nn
rank = self._dynamical_system.rank
shooting_values = np.zeros((n, n, self._n_shooting_nodes), order='F')
j = 0
indices = np.array([], dtype=int)
for i, node in enumerate(self._shooting_nodes):
if node in self._bvp_nodes:
shooting_values[..., i] = self._boundary_values[..., j]
indices = np.append(indices, [i])
j += 1
z_gamma = self._bvp.boundary_values.z_gamma
projected_values = np.einsum('ai, ajr,jkr->ikr', z_gamma,
shooting_values, self._t2s)
self._boundary_indices = indices
return projected_values.reshape(rank,
rank * self._n_shooting_nodes,
order='F')
def _check_shooting_nodes(self) -> None:
for node in self._bvp_nodes:
if node not in self._shooting_nodes:
raise ValueError(
"Shooting nodes {}\nhave to include boundary value nodes {}"
.format(self._shooting_nodes, self._bvp_nodes))
@property
def bvp(self) -> MultipleBoundaryValueProblem:
return self._bvp
def _get_shooting_matrix(self) -> np.ndarray:
gis = self._gis
jis = self._compute_jis()
dim = self._dynamical_system.rank * self._n_shooting_nodes
shooting_matrix = np.zeros((dim, dim), order='F')
# TODO: Inefficient, probably needs low level implementation
diag = linalg.block_diag(*(gis[..., i] for i in range(gis.shape[-1])))
shooting_matrix[:diag.shape[0], :diag.shape[1]] = -diag
for i in range(self._n_shooting_nodes)[:-1]:
size = self._dynamical_system.rank * i
sizep1 = self._dynamical_system.rank * (i + 1)
sizep2 = self._dynamical_system.rank * (i + 2)
shooting_matrix[size:sizep1, sizep1:sizep2] = jis[..., i]
shooting_matrix[sizep1:, :] = self._shooting_values
return shooting_matrix
def _get_mesh(self, stepsize: float) -> np.ndarray:
mesh = np.concatenate((np.arange(t_lower, t_upper, stepsize)
for t_lower, t_upper in self._intervals))
return mesh
def _set_gis(self) -> None:
t2 = self._t2s
t2_1 = t2[:, :, 1:]
t2_e = t2[:, :, :-1]
gis = np.einsum('jir,jkr,klr->ilr', t2_1, self._flows, t2_e)
self._gis: np.ndarray = gis
def _compute_jis(self) -> np.ndarray:
# TODO: only works in the linear case
rank = self._dynamical_system.rank
ntemp = self._n_shooting_nodes - 1
return np.array(ntemp * (np.identity(rank), )).T
def _set_t2s(self) -> None:
rank = self._dynamical_system.rank
t2s = np.zeros((self._nn, rank, self._n_shooting_nodes), order='F')
for i, node in enumerate(self._shooting_nodes):
t2s[:, :, i] = self._dynamical_system.t2(node)
self._t2s = t2s
def _newton_step(self, current_node_states: np.ndarray,
rhs: np.ndarray) -> np.ndarray:
shooting_matrix = self._get_shooting_matrix()
lin_sys_sol = np.linalg.solve(shooting_matrix, rhs)
next_nodes = current_node_states - restack(lin_sys_sol,
current_node_states.shape)
return next_nodes
def _get_newton_rhs(self, current_node_states: np.ndarray) -> np.ndarray:
current_x_d = self._get_x_d(current_node_states)
boundary_node_states = current_x_d[..., self._boundary_indices]
bound_res = self._bvp.boundary_values.residual(boundary_node_states)
computed_node_states = self._forward_projected_nodes(
current_node_states)
diffs = current_node_states[..., 1:] - computed_node_states[..., :-1]
res_b = unstack(diffs)
return np.concatenate((res_b, bound_res), axis=0)
def _forward_projected_nodes(self, node_values: np.ndarray) -> np.ndarray:
solver = self._flow_solver
lift_up = self._get_x_d(node_values)
sol = solver.forward_solve_differential(lift_up)
return self._project_values(sol)
def _get_initial_guess(self, initial_guess: Optional[np.ndarray] = None
) -> np.ndarray:
shape = self._dynamical_system.variables.shape + (
self._n_shooting_nodes, )
if initial_guess is None:
initial_guess = np.zeros(shape, order='F')
elif initial_guess.shape != shape:
raise ValueError(
"initial_guess.shape is {} but should equal {}".format(
initial_guess.shape, shape))
return initial_guess
def _project_values(self, values: np.ndarray) -> np.ndarray:
return np.einsum('ijr,i...r->j...r', self._t2s, values)
def _run(self,
time_interval=None,
initial_guess=None,
dynamic_update=False,
*args,
**kwargs) -> TimeSolution:
logger.info('''MultipleShooting solver initialized with\n
shooting_nodes: {}\n
boundary_nodes: {}\n'''.format(self._shooting_nodes, self._bvp_nodes))
self._init_solver(time_interval, *args, **kwargs)
initial_guess = self._get_initial_guess(initial_guess)
projected_values = self._project_values(initial_guess)
for i in range(self.max_iter):
residual = self._get_newton_rhs(projected_values)
if self.abort(residual):
break
projected_values = self._newton_step(projected_values, residual)
x_d = self._get_x_d(projected_values)
full_node_values = x_d - np.einsum('ijr,j...r->i...r', self._das,
x_d) - self._fas
node_solution = TimeSolution(self._shooting_nodes, full_node_values)
if dynamic_update:
time_grid_solution = deepcopy(node_solution)
time_grid_solution.dynamic_update = self._get_intermediate_values
else:
time_grid_solution = self._get_intermediate_values(
node_solution, self._solution_time_grid)
self._bvp.variables.current_values = time_grid_solution
return time_grid_solution
def _get_intermediate_values(self, node_solution: np.ndarray,
time_grid: np.ndarray) -> TimeSolution:
# this is rather slow, but it's an inherent disadvantage of the shooting approach
logger.debug(
"Getting intermediate values on time grid of size: {}".format(
time_grid.size))
idsize = time_grid.size
idx = np.append(
np.searchsorted(time_grid, node_solution.time_grid, 'left'),
idsize)
solution = np.zeros(
(*node_solution.solution.shape[:-1], time_grid.size))
f_columns = self._bvp.boundary_values.n_inhom
self._ivp_problem.init_solver(stepsize=self._stepsize,
f_columns=f_columns,
abs_tol=self.abs_tol,
rel_tol=self.abs_tol)
# TODO: Also compute backwards for better stability / difference to node points
for i, node in enumerate(node_solution.time_grid):
loweridx = idx[i]
upperidx = idx[i + 1]
grid = time_grid[loweridx:upperidx]
t0 = node
if grid.size == 0:
continue
x0 = node_solution(node)
tf = grid[-1]
interval = Time(t0, tf, grid)
x0_times = self._ivp_problem.solve(interval, x0)
if node in grid or x0_times.solution.shape[-1] == 1:
idx_increment = 0
else:
idx_increment = 1
logger.debug("x0_timesshape: {}".format(x0_times.solution.shape))
solution[..., loweridx:upperidx] = np.atleast_2d(
x0_times.solution.T).T[..., idx_increment:]
return TimeSolution(time_grid, solution)
def _get_x_d(self, projected_values: np.ndarray) -> np.ndarray:
return np.einsum('ijr,j...r->i...r', self._t2s, projected_values)
def _set_d_as(self) -> None:
das = np.zeros((self._nn, self._nn, self._n_shooting_nodes), order='F')
shape = self._dynamical_system.variables.shape
fas = np.zeros((*shape, self._n_shooting_nodes), order='F')
self._dynamical_system.init_rank()
for i, node in enumerate(self._shooting_nodes):
das[:, :, i] = self._dynamical_system.d_a(node)
fas[..., i] = self._dynamical_system.f_a(node)
self._das = das
self._fas = fas
solver_container_factory.register_solver(MultipleBoundaryValueProblem,
MultipleShooting,
default=True)
solver_container_factory.register_solver(
LQOptimalControl,
MultipleShooting,
default=True,
creator_function=LQOptimalControl.get_bvp) | 0.76555 | 0.282833 |
import numpy as np
import cbh
def test_fragmentation_cbh1():
test_smiles = "C1=C[C@H]2C[C@@H]1CC2"
assert sorted(cbh.get_components_scheme1(test_smiles)) == sorted("CC C=C CC CC CC CC CC CC".split())
return
def test_fragmentation_cbh2():
test_smiles = "C1=C[C@H]2C[C@@H]1CC2"
assert sorted(cbh.get_components_scheme2(test_smiles)) == sorted("C=CC C=CC CCC CCC CCC CC(C)C CC(C)C".split())
return
def test_fragmentation_reaction_cbh1():
reactants, products = ["C1=CC=CC1", "C=C"], ["C1=C[C@H]2C[C@@H]1CC2"]
left, right = cbh.cbh_n(reactants, products, 1)
assert sorted(left) == sorted(['C', 'C', 'C', 'C', 'C=C', 'C=C'])
assert sorted(right) == sorted(['CC', 'CC', 'CC', 'CC'])
return
def test_fragmentation_reaction_cbh2():
reactants, products = ["C1=CC=CC1", "C=C"], ["C1=C[C@H]2C[C@@H]1CC2"]
left, right = cbh.cbh_n(reactants, products, 2)
assert sorted(left) == sorted(['C=CC', 'C=CC', 'CC', 'CC', 'CC', 'CC'])
assert sorted(right) == sorted(['CC(C)C', 'CC(C)C', 'CCC', 'CCC'])
return
def test_split_smiles():
assert cbh.split_smiles("CC.CC") == ["CC", "CC"]
assert cbh.split_smiles("2;CC", num_sep=";") == ["CC", "CC"]
assert cbh.split_smiles(["CC", "CC.CC"]) == ["CC", "CC", "CC"]
return
def test_get_components_scheme1():
smiles = "C=[NH+]C"
components = ["C=[NH2+]", "C[NH3+]"]
output = cbh.get_components_scheme1("C=[NH+]C")
assert sorted(components) == sorted(output)
smiles = "C#[N+]C"
components = ["C#[NH+]", "C[NH3+]"]
output = cbh.get_components_scheme1(smiles)
assert sorted(components) == sorted(output)
smiles = "CC(=O)[O-]"
components = ["CC", "C=O", "C[O-]"]
output = cbh.get_components_scheme1(smiles)
assert sorted(components) == sorted(output)
smiles = "C[S+](C)C"
components = ['C[SH2+]', 'C[SH2+]', 'C[SH2+]']
output = cbh.get_components_scheme1(smiles)
assert sorted(components) == sorted(output)
return
def test_get_components_scheme2():
fun = cbh.get_components_scheme2
# getting the right number of H on N
smiles = "CCc1c[nH]c2ccccc12"
components = ['CCC', 'C=CN', 'CNC', 'C=CC', 'C=CC', 'C=CC', 'C=CC', 'C=C(C)C', 'C=C(C)C', 'C=C(C)N']
output = fun(smiles)
assert sorted(components) == sorted(output)
# connected smiles
smiles = "C1CO1"
components = ['CCO', 'CCO', 'COC']
output = fun(smiles)
assert sorted(components) == sorted(output)
return
if __name__ == "__main__":
print("use python3 -m pytest test.py") | test.py | import numpy as np
import cbh
def test_fragmentation_cbh1():
test_smiles = "C1=C[C@H]2C[C@@H]1CC2"
assert sorted(cbh.get_components_scheme1(test_smiles)) == sorted("CC C=C CC CC CC CC CC CC".split())
return
def test_fragmentation_cbh2():
test_smiles = "C1=C[C@H]2C[C@@H]1CC2"
assert sorted(cbh.get_components_scheme2(test_smiles)) == sorted("C=CC C=CC CCC CCC CCC CC(C)C CC(C)C".split())
return
def test_fragmentation_reaction_cbh1():
reactants, products = ["C1=CC=CC1", "C=C"], ["C1=C[C@H]2C[C@@H]1CC2"]
left, right = cbh.cbh_n(reactants, products, 1)
assert sorted(left) == sorted(['C', 'C', 'C', 'C', 'C=C', 'C=C'])
assert sorted(right) == sorted(['CC', 'CC', 'CC', 'CC'])
return
def test_fragmentation_reaction_cbh2():
reactants, products = ["C1=CC=CC1", "C=C"], ["C1=C[C@H]2C[C@@H]1CC2"]
left, right = cbh.cbh_n(reactants, products, 2)
assert sorted(left) == sorted(['C=CC', 'C=CC', 'CC', 'CC', 'CC', 'CC'])
assert sorted(right) == sorted(['CC(C)C', 'CC(C)C', 'CCC', 'CCC'])
return
def test_split_smiles():
assert cbh.split_smiles("CC.CC") == ["CC", "CC"]
assert cbh.split_smiles("2;CC", num_sep=";") == ["CC", "CC"]
assert cbh.split_smiles(["CC", "CC.CC"]) == ["CC", "CC", "CC"]
return
def test_get_components_scheme1():
smiles = "C=[NH+]C"
components = ["C=[NH2+]", "C[NH3+]"]
output = cbh.get_components_scheme1("C=[NH+]C")
assert sorted(components) == sorted(output)
smiles = "C#[N+]C"
components = ["C#[NH+]", "C[NH3+]"]
output = cbh.get_components_scheme1(smiles)
assert sorted(components) == sorted(output)
smiles = "CC(=O)[O-]"
components = ["CC", "C=O", "C[O-]"]
output = cbh.get_components_scheme1(smiles)
assert sorted(components) == sorted(output)
smiles = "C[S+](C)C"
components = ['C[SH2+]', 'C[SH2+]', 'C[SH2+]']
output = cbh.get_components_scheme1(smiles)
assert sorted(components) == sorted(output)
return
def test_get_components_scheme2():
fun = cbh.get_components_scheme2
# getting the right number of H on N
smiles = "CCc1c[nH]c2ccccc12"
components = ['CCC', 'C=CN', 'CNC', 'C=CC', 'C=CC', 'C=CC', 'C=CC', 'C=C(C)C', 'C=C(C)C', 'C=C(C)N']
output = fun(smiles)
assert sorted(components) == sorted(output)
# connected smiles
smiles = "C1CO1"
components = ['CCO', 'CCO', 'COC']
output = fun(smiles)
assert sorted(components) == sorted(output)
return
if __name__ == "__main__":
print("use python3 -m pytest test.py") | 0.760828 | 0.456834 |
import subprocess, json, re, csv, sys, functools
from jubatus.recommender import client
from jubatus.recommender import types
from jubatus.common import Datum
def partial_commit_json():
dump =subprocess.check_output( ['git', 'log', '--pretty=format:{%n \"commit\": \"%H\",%n \"author\": \"%an <%ae>\",%n \"date\": \"%ad\",%n \"message\": \"%f\"%n},'] ).decode('UTF-8')
return json.loads("[" + dump[:-1] + "]")
def update_files_per_revision():
dump =subprocess.check_output( ['git', '--no-pager', 'log', '--name-only', '--format=\'%H', '--pretty=format:'] ).decode('UTF-8')
chunk=[]
for x in re.split('\n\n', dump):
chunk.append([xx for xx in re.split('\n', x) if len(xx)!=0])
return chunk
def merge(j, f):
return dict([('author', j['author']), ('message', j['message']), ('commit', j['commit']),('date', j['date']),('files', f)])
def get_log_as_json():
return list(map(merge, partial_commit_json(), update_files_per_revision()))
log_as_json = get_log_as_json()
commits=[revision['commit'] for revision in log_as_json]
files=list(functools.reduce(lambda acc,e: acc.union(e['files']), log_as_json,set()))
log_csv={}
# initialize the matrix(file, commit). its elements are zero.
for f in files:
log_csv.update([(f, [0 for x in range(len(commits))])])
for commit_index in range(len(log_as_json)):
commit = log_as_json[commit_index]
for f in commit['files']:
log_csv[f][commit_index]=1
recommender = client.Recommender("127.0.0.1", 9199, "my_recommender")
for filename,cmts in log_csv.items():
d={}
for i in range(len(cmts)):
d.update([(commits[i], cmts[i])])
recommender.update_row(filename, Datum(d))
relation_map={}
for f in files:
r = [x.id for x in recommender.similar_row_from_id(f, 4)]
# print(r[0] + ' -> ' + ', '.join(r[1:]))
relation_map.update([(f, [x for x in r[1:]])])
print(relation_map)
# print(relation_map)
# [file1, file2, ll]
sorted_names=sorted(relation_map.keys())
def create_link(name):
def get_index_by_name(name):
return sorted_names.index(name)
idx=get_index_by_name(name)
related_indice=[get_index_by_name(n) for n in relation_map[name] if (n in sorted_names)]
return [{'source': idx, 'target': i, 'value': 1} for i in related_indice if i > idx]
a = [create_link(n) for n in sorted_names]
with open('hoge.json', 'w') as f:
json.dump({'nodes': [{'name': name, 'group': 1} for name in sorted_names],
'links': [e for aa in a for e in aa]}, f,indent=2) | gitlog_json_print.py |
import subprocess, json, re, csv, sys, functools
from jubatus.recommender import client
from jubatus.recommender import types
from jubatus.common import Datum
def partial_commit_json():
dump =subprocess.check_output( ['git', 'log', '--pretty=format:{%n \"commit\": \"%H\",%n \"author\": \"%an <%ae>\",%n \"date\": \"%ad\",%n \"message\": \"%f\"%n},'] ).decode('UTF-8')
return json.loads("[" + dump[:-1] + "]")
def update_files_per_revision():
dump =subprocess.check_output( ['git', '--no-pager', 'log', '--name-only', '--format=\'%H', '--pretty=format:'] ).decode('UTF-8')
chunk=[]
for x in re.split('\n\n', dump):
chunk.append([xx for xx in re.split('\n', x) if len(xx)!=0])
return chunk
def merge(j, f):
return dict([('author', j['author']), ('message', j['message']), ('commit', j['commit']),('date', j['date']),('files', f)])
def get_log_as_json():
return list(map(merge, partial_commit_json(), update_files_per_revision()))
log_as_json = get_log_as_json()
commits=[revision['commit'] for revision in log_as_json]
files=list(functools.reduce(lambda acc,e: acc.union(e['files']), log_as_json,set()))
log_csv={}
# initialize the matrix(file, commit). its elements are zero.
for f in files:
log_csv.update([(f, [0 for x in range(len(commits))])])
for commit_index in range(len(log_as_json)):
commit = log_as_json[commit_index]
for f in commit['files']:
log_csv[f][commit_index]=1
recommender = client.Recommender("127.0.0.1", 9199, "my_recommender")
for filename,cmts in log_csv.items():
d={}
for i in range(len(cmts)):
d.update([(commits[i], cmts[i])])
recommender.update_row(filename, Datum(d))
relation_map={}
for f in files:
r = [x.id for x in recommender.similar_row_from_id(f, 4)]
# print(r[0] + ' -> ' + ', '.join(r[1:]))
relation_map.update([(f, [x for x in r[1:]])])
print(relation_map)
# print(relation_map)
# [file1, file2, ll]
sorted_names=sorted(relation_map.keys())
def create_link(name):
def get_index_by_name(name):
return sorted_names.index(name)
idx=get_index_by_name(name)
related_indice=[get_index_by_name(n) for n in relation_map[name] if (n in sorted_names)]
return [{'source': idx, 'target': i, 'value': 1} for i in related_indice if i > idx]
a = [create_link(n) for n in sorted_names]
with open('hoge.json', 'w') as f:
json.dump({'nodes': [{'name': name, 'group': 1} for name in sorted_names],
'links': [e for aa in a for e in aa]}, f,indent=2) | 0.174868 | 0.12692 |
from django.shortcuts import render
from helpers import download
import pandas as pd
import os
cwd = os.getcwd()
perf_data_path = cwd + "/perf_data/"
current_release = [os.getenv('CUR_RELEASE')]
# Create your views here.
def monitoring_overview(request):
return render(request, "monitoring_overview.html")
def cur_regression(request):
cur_href_links, _, cur_release_dates, _, _, _ = download.download_benchmark_csv(60)
latency_none_mtls_base_p90 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_none_mtls_baseline', 'p90')
latency_none_mtls_both_p90 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_none_mtls_both', 'p90')
latency_none_plaintext_both_p90 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_none_plaintext_both', 'p90')
latency_v2_stats_nullvm_both_p90 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_v2-stats-nullvm_both', 'p90')
latency_v2_stats_wasm_both_p90 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_v2-stats-wasm_both', 'p90')
latency_v2_sd_nologging_nullvm_both_p90 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_v2-sd-nologging-nullvm_both', 'p90')
latency_v2_sd_full_nullvm_both_p90 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_v2-sd-full-nullvm_both', 'p90')
latency_none_security_authz_ip_both_p90 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_none_security_authz_ip_both', 'p90')
latency_none_security_authz_path_both_p90 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_none_security_authz_path_both', 'p90')
latency_none_security_authz_jwt_both_p90 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_none_security_authz_jwt_both', 'p90')
latency_none_security_peer_authn_both_p90 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_none_security_peer_authn_both', 'p90')
latency_none_mtls_base_p99 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_none_mtls_baseline', 'p99')
latency_none_mtls_both_p99 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_none_mtls_both', 'p99')
latency_none_plaintext_both_p99 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_none_plaintext_both', 'p99')
latency_v2_stats_nullvm_both_p99 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_v2-stats-nullvm_both', 'p99')
latency_v2_stats_wasm_both_p99 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_v2-stats-wasm_both', 'p99')
latency_v2_sd_nologging_nullvm_both_p99 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_v2-sd-nologging-nullvm_both', 'p99')
latency_v2_sd_full_nullvm_both_p99 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_v2-sd-full-nullvm_both', 'p99')
latency_none_security_authz_ip_both_p99 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_none_security_authz_ip_both', 'p99')
latency_none_security_authz_path_both_p99 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_none_security_authz_path_both', 'p99')
latency_none_security_authz_jwt_both_p99 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_none_security_authz_jwt_both', 'p99')
latency_none_security_peer_authn_both_p99 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_none_security_peer_authn_both', 'p99')
context = {'current_release': current_release,
'latency_none_mtls_base_p90': latency_none_mtls_base_p90,
'latency_none_mtls_both_p90': latency_none_mtls_both_p90,
'latency_none_plaintext_both_p90': latency_none_plaintext_both_p90,
'latency_v2_stats_nullvm_both_p90': latency_v2_stats_nullvm_both_p90,
'latency_v2_stats_wasm_both_p90': latency_v2_stats_wasm_both_p90,
'latency_v2_sd_nologging_nullvm_both_p90': latency_v2_sd_nologging_nullvm_both_p90,
'latency_v2_sd_full_nullvm_both_p90': latency_v2_sd_full_nullvm_both_p90,
'latency_none_security_authz_ip_both_p90': latency_none_security_authz_ip_both_p90,
'latency_none_security_authz_path_both_p90': latency_none_security_authz_path_both_p90,
'latency_none_security_authz_jwt_both_p90': latency_none_security_authz_jwt_both_p90,
'latency_none_security_peer_authn_both_p90': latency_none_security_peer_authn_both_p90,
'latency_none_mtls_base_p99': latency_none_mtls_base_p99,
'latency_none_mtls_both_p99': latency_none_mtls_both_p99,
'latency_none_plaintext_both_p99': latency_none_plaintext_both_p99,
'latency_v2_stats_nullvm_both_p99': latency_v2_stats_nullvm_both_p99,
'latency_v2_stats_wasm_both_p99': latency_v2_stats_wasm_both_p99,
'latency_v2_sd_nologging_nullvm_both_p99': latency_v2_sd_nologging_nullvm_both_p99,
'latency_v2_sd_full_nullvm_both_p99': latency_v2_sd_full_nullvm_both_p99,
'latency_none_security_authz_ip_both_p99': latency_none_security_authz_ip_both_p99,
'latency_none_security_authz_path_both_p99': latency_none_security_authz_path_both_p99,
'latency_none_security_authz_jwt_both_p99': latency_none_security_authz_jwt_both_p99,
'latency_none_security_peer_authn_both_p99': latency_none_security_peer_authn_both_p99,
}
return render(request, "cur_regression.html", context=context)
# Create your views here.
def master_regression(request):
_, _, _, master_href_links, _, master_release_dates = download.download_benchmark_csv(60)
latency_none_mtls_base_p90_master = get_telemetry_mode_y_series(master_href_links, master_release_dates, '_none_mtls_baseline', 'p90')
latency_none_mtls_both_p90_master = get_telemetry_mode_y_series(master_href_links, master_release_dates, '_none_mtls_both', 'p90')
latency_none_plaintext_both_p90_master = get_telemetry_mode_y_series(master_href_links, master_release_dates, '_none_plaintext_both', 'p90')
latency_v2_stats_nullvm_both_p90_master = get_telemetry_mode_y_series(master_href_links, master_release_dates, '_v2-stats-nullvm_both', 'p90')
latency_v2_stats_wasm_both_p90_master = get_telemetry_mode_y_series(master_href_links, master_release_dates, '_v2-stats-wasm_both', 'p90')
latency_v2_sd_nologging_nullvm_both_p90_master = get_telemetry_mode_y_series(master_href_links, master_release_dates, '_v2-sd-nologging-nullvm_both', 'p90')
latency_v2_sd_full_nullvm_both_p90_master = get_telemetry_mode_y_series(master_href_links, master_release_dates, '_v2-sd-full-nullvm_both', 'p90')
latency_none_security_authz_ip_both_p90_master = get_telemetry_mode_y_series(master_href_links, master_release_dates, '_none_security_authz_ip_both', 'p90')
latency_none_security_authz_path_both_p90_master = get_telemetry_mode_y_series(
master_href_links, master_release_dates, '_none_security_authz_path_both', 'p90')
latency_none_security_authz_jwt_both_p90_master = get_telemetry_mode_y_series(
master_href_links, master_release_dates, '_none_security_authz_jwt_both', 'p90')
latency_none_security_peer_authn_both_p90_master = get_telemetry_mode_y_series(
master_href_links, master_release_dates, '_none_security_peer_authn_both', 'p90')
latency_none_mtls_base_p99_master = get_telemetry_mode_y_series(master_href_links, master_release_dates, '_none_mtls_baseline', 'p99')
latency_none_mtls_both_p99_master = get_telemetry_mode_y_series(master_href_links, master_release_dates, '_none_mtls_both', 'p99')
latency_none_plaintext_both_p99_master = get_telemetry_mode_y_series(master_href_links, master_release_dates, '_none_plaintext_both', 'p99')
latency_v2_stats_nullvm_both_p99_master = get_telemetry_mode_y_series(master_href_links, master_release_dates, '_v2-stats-nullvm_both', 'p99')
latency_v2_stats_wasm_both_p99_master = get_telemetry_mode_y_series(master_href_links, master_release_dates, '_v2-stats-wasm_both', 'p99')
latency_v2_sd_nologging_nullvm_both_p99_master = get_telemetry_mode_y_series(master_href_links, master_release_dates, '_v2-sd-nologging-nullvm_both', 'p99')
latency_v2_sd_full_nullvm_both_p99_master = get_telemetry_mode_y_series(master_href_links, master_release_dates, '_v2-sd-full-nullvm_both', 'p99')
latency_none_security_authz_ip_both_p99_master = get_telemetry_mode_y_series(master_href_links, master_release_dates, '_none_security_authz_ip_both', 'p99')
latency_none_security_authz_path_both_p99_master = get_telemetry_mode_y_series(
master_href_links, master_release_dates, '_none_security_authz_path_both', 'p99')
latency_none_security_authz_jwt_both_p99_master = get_telemetry_mode_y_series(
master_href_links, master_release_dates, '_none_security_authz_jwt_both', 'p99')
latency_none_security_peer_authn_both_p99_master = get_telemetry_mode_y_series(
master_href_links, master_release_dates, '_none_security_peer_authn_both', 'p99')
context = {'latency_none_mtls_base_p90_master': latency_none_mtls_base_p90_master,
'latency_none_mtls_both_p90_master': latency_none_mtls_both_p90_master,
'latency_none_plaintext_both_p90_master': latency_none_plaintext_both_p90_master,
'latency_v2_stats_nullvm_both_p90_master': latency_v2_stats_nullvm_both_p90_master,
'latency_v2_stats_wasm_both_p90_master': latency_v2_stats_wasm_both_p90_master,
'latency_v2_sd_nologging_nullvm_both_p90_master': latency_v2_sd_nologging_nullvm_both_p90_master,
'latency_v2_sd_full_nullvm_both_p90_master': latency_v2_sd_full_nullvm_both_p90_master,
'latency_none_security_authz_ip_both_p90_master': latency_none_security_authz_ip_both_p90_master,
'latency_none_security_authz_path_both_p90_master': latency_none_security_authz_path_both_p90_master,
'latency_none_security_authz_jwt_both_p90_master': latency_none_security_authz_jwt_both_p90_master,
'latency_none_security_peer_authn_both_p90_master': latency_none_security_peer_authn_both_p90_master,
'latency_none_mtls_base_p99_master': latency_none_mtls_base_p99_master,
'latency_none_mtls_both_p99_master': latency_none_mtls_both_p99_master,
'latency_none_plaintext_both_p99_master': latency_none_plaintext_both_p99_master,
'latency_v2_stats_nullvm_both_p99_master': latency_v2_stats_nullvm_both_p99_master,
'latency_v2_stats_wasm_both_p99_master': latency_v2_stats_wasm_both_p99_master,
'latency_v2_sd_nologging_nullvm_both_p99_master': latency_v2_sd_nologging_nullvm_both_p99_master,
'latency_v2_sd_full_nullvm_both_p99_master': latency_v2_sd_full_nullvm_both_p99_master,
'latency_none_security_authz_ip_both_p99_master': latency_none_security_authz_ip_both_p99_master,
'latency_none_security_authz_path_both_p99_master': latency_none_security_authz_path_both_p99_master,
'latency_none_security_authz_jwt_both_p99_master': latency_none_security_authz_jwt_both_p99_master,
'latency_none_security_peer_authn_both_p99_master': latency_none_security_peer_authn_both_p99_master,
}
return render(request, "master_regression.html", context=context)
# Helpers
def get_latency_y_data_point(df, telemetry_mode, quantiles):
y_series_data = []
data = df.query('ActualQPS == 1000 and NumThreads == 16 and Labels.str.endswith(@telemetry_mode)')
quantile_data = data.get(quantiles)
if quantile_data is None or len(quantile_data) == 0:
y_series_data.append('null')
else:
y_series_data.append(data[quantiles].head(1).values[0] / 1000)
return y_series_data
def get_telemetry_mode_y_series(release_href_links, release_dates, telemetry_mode, quantiles):
trending_data = [[]] * len(release_href_links)
for i in range(len(release_href_links)):
release_year = release_dates[i][0:4]
release_month = release_dates[i][4:6]
release_date = release_dates[i][6:]
release_list = [release_year, release_month, release_date]
try:
href_parts = release_href_links[i].split("/")
benchmark_test_id = href_parts[4]
df = pd.read_csv(perf_data_path + benchmark_test_id + "_benchmark.csv")
except Exception as e:
print(e)
trending_data[i] = release_list + ["null"]
else:
trending_data[i] = release_list + [get_latency_y_data_point(df, telemetry_mode, quantiles)]
return trending_data | perf_dashboard/regressions/views.py |
from django.shortcuts import render
from helpers import download
import pandas as pd
import os
cwd = os.getcwd()
perf_data_path = cwd + "/perf_data/"
current_release = [os.getenv('CUR_RELEASE')]
# Create your views here.
def monitoring_overview(request):
return render(request, "monitoring_overview.html")
def cur_regression(request):
cur_href_links, _, cur_release_dates, _, _, _ = download.download_benchmark_csv(60)
latency_none_mtls_base_p90 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_none_mtls_baseline', 'p90')
latency_none_mtls_both_p90 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_none_mtls_both', 'p90')
latency_none_plaintext_both_p90 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_none_plaintext_both', 'p90')
latency_v2_stats_nullvm_both_p90 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_v2-stats-nullvm_both', 'p90')
latency_v2_stats_wasm_both_p90 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_v2-stats-wasm_both', 'p90')
latency_v2_sd_nologging_nullvm_both_p90 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_v2-sd-nologging-nullvm_both', 'p90')
latency_v2_sd_full_nullvm_both_p90 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_v2-sd-full-nullvm_both', 'p90')
latency_none_security_authz_ip_both_p90 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_none_security_authz_ip_both', 'p90')
latency_none_security_authz_path_both_p90 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_none_security_authz_path_both', 'p90')
latency_none_security_authz_jwt_both_p90 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_none_security_authz_jwt_both', 'p90')
latency_none_security_peer_authn_both_p90 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_none_security_peer_authn_both', 'p90')
latency_none_mtls_base_p99 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_none_mtls_baseline', 'p99')
latency_none_mtls_both_p99 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_none_mtls_both', 'p99')
latency_none_plaintext_both_p99 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_none_plaintext_both', 'p99')
latency_v2_stats_nullvm_both_p99 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_v2-stats-nullvm_both', 'p99')
latency_v2_stats_wasm_both_p99 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_v2-stats-wasm_both', 'p99')
latency_v2_sd_nologging_nullvm_both_p99 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_v2-sd-nologging-nullvm_both', 'p99')
latency_v2_sd_full_nullvm_both_p99 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_v2-sd-full-nullvm_both', 'p99')
latency_none_security_authz_ip_both_p99 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_none_security_authz_ip_both', 'p99')
latency_none_security_authz_path_both_p99 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_none_security_authz_path_both', 'p99')
latency_none_security_authz_jwt_both_p99 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_none_security_authz_jwt_both', 'p99')
latency_none_security_peer_authn_both_p99 = get_telemetry_mode_y_series(cur_href_links, cur_release_dates, '_none_security_peer_authn_both', 'p99')
context = {'current_release': current_release,
'latency_none_mtls_base_p90': latency_none_mtls_base_p90,
'latency_none_mtls_both_p90': latency_none_mtls_both_p90,
'latency_none_plaintext_both_p90': latency_none_plaintext_both_p90,
'latency_v2_stats_nullvm_both_p90': latency_v2_stats_nullvm_both_p90,
'latency_v2_stats_wasm_both_p90': latency_v2_stats_wasm_both_p90,
'latency_v2_sd_nologging_nullvm_both_p90': latency_v2_sd_nologging_nullvm_both_p90,
'latency_v2_sd_full_nullvm_both_p90': latency_v2_sd_full_nullvm_both_p90,
'latency_none_security_authz_ip_both_p90': latency_none_security_authz_ip_both_p90,
'latency_none_security_authz_path_both_p90': latency_none_security_authz_path_both_p90,
'latency_none_security_authz_jwt_both_p90': latency_none_security_authz_jwt_both_p90,
'latency_none_security_peer_authn_both_p90': latency_none_security_peer_authn_both_p90,
'latency_none_mtls_base_p99': latency_none_mtls_base_p99,
'latency_none_mtls_both_p99': latency_none_mtls_both_p99,
'latency_none_plaintext_both_p99': latency_none_plaintext_both_p99,
'latency_v2_stats_nullvm_both_p99': latency_v2_stats_nullvm_both_p99,
'latency_v2_stats_wasm_both_p99': latency_v2_stats_wasm_both_p99,
'latency_v2_sd_nologging_nullvm_both_p99': latency_v2_sd_nologging_nullvm_both_p99,
'latency_v2_sd_full_nullvm_both_p99': latency_v2_sd_full_nullvm_both_p99,
'latency_none_security_authz_ip_both_p99': latency_none_security_authz_ip_both_p99,
'latency_none_security_authz_path_both_p99': latency_none_security_authz_path_both_p99,
'latency_none_security_authz_jwt_both_p99': latency_none_security_authz_jwt_both_p99,
'latency_none_security_peer_authn_both_p99': latency_none_security_peer_authn_both_p99,
}
return render(request, "cur_regression.html", context=context)
# Create your views here.
def master_regression(request):
_, _, _, master_href_links, _, master_release_dates = download.download_benchmark_csv(60)
latency_none_mtls_base_p90_master = get_telemetry_mode_y_series(master_href_links, master_release_dates, '_none_mtls_baseline', 'p90')
latency_none_mtls_both_p90_master = get_telemetry_mode_y_series(master_href_links, master_release_dates, '_none_mtls_both', 'p90')
latency_none_plaintext_both_p90_master = get_telemetry_mode_y_series(master_href_links, master_release_dates, '_none_plaintext_both', 'p90')
latency_v2_stats_nullvm_both_p90_master = get_telemetry_mode_y_series(master_href_links, master_release_dates, '_v2-stats-nullvm_both', 'p90')
latency_v2_stats_wasm_both_p90_master = get_telemetry_mode_y_series(master_href_links, master_release_dates, '_v2-stats-wasm_both', 'p90')
latency_v2_sd_nologging_nullvm_both_p90_master = get_telemetry_mode_y_series(master_href_links, master_release_dates, '_v2-sd-nologging-nullvm_both', 'p90')
latency_v2_sd_full_nullvm_both_p90_master = get_telemetry_mode_y_series(master_href_links, master_release_dates, '_v2-sd-full-nullvm_both', 'p90')
latency_none_security_authz_ip_both_p90_master = get_telemetry_mode_y_series(master_href_links, master_release_dates, '_none_security_authz_ip_both', 'p90')
latency_none_security_authz_path_both_p90_master = get_telemetry_mode_y_series(
master_href_links, master_release_dates, '_none_security_authz_path_both', 'p90')
latency_none_security_authz_jwt_both_p90_master = get_telemetry_mode_y_series(
master_href_links, master_release_dates, '_none_security_authz_jwt_both', 'p90')
latency_none_security_peer_authn_both_p90_master = get_telemetry_mode_y_series(
master_href_links, master_release_dates, '_none_security_peer_authn_both', 'p90')
latency_none_mtls_base_p99_master = get_telemetry_mode_y_series(master_href_links, master_release_dates, '_none_mtls_baseline', 'p99')
latency_none_mtls_both_p99_master = get_telemetry_mode_y_series(master_href_links, master_release_dates, '_none_mtls_both', 'p99')
latency_none_plaintext_both_p99_master = get_telemetry_mode_y_series(master_href_links, master_release_dates, '_none_plaintext_both', 'p99')
latency_v2_stats_nullvm_both_p99_master = get_telemetry_mode_y_series(master_href_links, master_release_dates, '_v2-stats-nullvm_both', 'p99')
latency_v2_stats_wasm_both_p99_master = get_telemetry_mode_y_series(master_href_links, master_release_dates, '_v2-stats-wasm_both', 'p99')
latency_v2_sd_nologging_nullvm_both_p99_master = get_telemetry_mode_y_series(master_href_links, master_release_dates, '_v2-sd-nologging-nullvm_both', 'p99')
latency_v2_sd_full_nullvm_both_p99_master = get_telemetry_mode_y_series(master_href_links, master_release_dates, '_v2-sd-full-nullvm_both', 'p99')
latency_none_security_authz_ip_both_p99_master = get_telemetry_mode_y_series(master_href_links, master_release_dates, '_none_security_authz_ip_both', 'p99')
latency_none_security_authz_path_both_p99_master = get_telemetry_mode_y_series(
master_href_links, master_release_dates, '_none_security_authz_path_both', 'p99')
latency_none_security_authz_jwt_both_p99_master = get_telemetry_mode_y_series(
master_href_links, master_release_dates, '_none_security_authz_jwt_both', 'p99')
latency_none_security_peer_authn_both_p99_master = get_telemetry_mode_y_series(
master_href_links, master_release_dates, '_none_security_peer_authn_both', 'p99')
context = {'latency_none_mtls_base_p90_master': latency_none_mtls_base_p90_master,
'latency_none_mtls_both_p90_master': latency_none_mtls_both_p90_master,
'latency_none_plaintext_both_p90_master': latency_none_plaintext_both_p90_master,
'latency_v2_stats_nullvm_both_p90_master': latency_v2_stats_nullvm_both_p90_master,
'latency_v2_stats_wasm_both_p90_master': latency_v2_stats_wasm_both_p90_master,
'latency_v2_sd_nologging_nullvm_both_p90_master': latency_v2_sd_nologging_nullvm_both_p90_master,
'latency_v2_sd_full_nullvm_both_p90_master': latency_v2_sd_full_nullvm_both_p90_master,
'latency_none_security_authz_ip_both_p90_master': latency_none_security_authz_ip_both_p90_master,
'latency_none_security_authz_path_both_p90_master': latency_none_security_authz_path_both_p90_master,
'latency_none_security_authz_jwt_both_p90_master': latency_none_security_authz_jwt_both_p90_master,
'latency_none_security_peer_authn_both_p90_master': latency_none_security_peer_authn_both_p90_master,
'latency_none_mtls_base_p99_master': latency_none_mtls_base_p99_master,
'latency_none_mtls_both_p99_master': latency_none_mtls_both_p99_master,
'latency_none_plaintext_both_p99_master': latency_none_plaintext_both_p99_master,
'latency_v2_stats_nullvm_both_p99_master': latency_v2_stats_nullvm_both_p99_master,
'latency_v2_stats_wasm_both_p99_master': latency_v2_stats_wasm_both_p99_master,
'latency_v2_sd_nologging_nullvm_both_p99_master': latency_v2_sd_nologging_nullvm_both_p99_master,
'latency_v2_sd_full_nullvm_both_p99_master': latency_v2_sd_full_nullvm_both_p99_master,
'latency_none_security_authz_ip_both_p99_master': latency_none_security_authz_ip_both_p99_master,
'latency_none_security_authz_path_both_p99_master': latency_none_security_authz_path_both_p99_master,
'latency_none_security_authz_jwt_both_p99_master': latency_none_security_authz_jwt_both_p99_master,
'latency_none_security_peer_authn_both_p99_master': latency_none_security_peer_authn_both_p99_master,
}
return render(request, "master_regression.html", context=context)
# Helpers
def get_latency_y_data_point(df, telemetry_mode, quantiles):
y_series_data = []
data = df.query('ActualQPS == 1000 and NumThreads == 16 and Labels.str.endswith(@telemetry_mode)')
quantile_data = data.get(quantiles)
if quantile_data is None or len(quantile_data) == 0:
y_series_data.append('null')
else:
y_series_data.append(data[quantiles].head(1).values[0] / 1000)
return y_series_data
def get_telemetry_mode_y_series(release_href_links, release_dates, telemetry_mode, quantiles):
trending_data = [[]] * len(release_href_links)
for i in range(len(release_href_links)):
release_year = release_dates[i][0:4]
release_month = release_dates[i][4:6]
release_date = release_dates[i][6:]
release_list = [release_year, release_month, release_date]
try:
href_parts = release_href_links[i].split("/")
benchmark_test_id = href_parts[4]
df = pd.read_csv(perf_data_path + benchmark_test_id + "_benchmark.csv")
except Exception as e:
print(e)
trending_data[i] = release_list + ["null"]
else:
trending_data[i] = release_list + [get_latency_y_data_point(df, telemetry_mode, quantiles)]
return trending_data | 0.353205 | 0.147647 |
import argparse
import logging
from opti_models.benchmarks.imagenet_torch_benchmark import main
logging.basicConfig(level=logging.INFO)
def parse_args():
# Default args
path = "/usr/local/opti_models/imagenetv2-top-images-format-val"
parser = argparse.ArgumentParser(description='Simple speed benchmark, based on pyTorch models')
parser.add_argument('--model-name', type=str, help="Name of the model to test", default='resnet18')
parser.add_argument(
'--path-to-images', default=path, type=str, help=f"Path to the validation images, default: {path}"
)
parser.add_argument('--size', default=(224, 224), nargs='+', type=int, help="Input shape, default=(224, 224)")
parser.add_argument('--batch-size', default=1, type=int, help="Size of the batch of images, default=1")
parser.add_argument('--workers', default=1, type=int, help="Number of workers, default=1")
return parser.parse_args()
def bench_all():
from opti_models.models.backbones.backbone_factory import show_available_backbones
included_names = [name for name in show_available_backbones()]
excluded_names = [
'efficientnet_b1b',
'efficientnet_b2b',
'efficientnet_b3b',
'efficientnet_b4b',
'efficientnet_b5b',
'efficientnet_b6b',
'efficientnet_b7b',
'efficientnet_b0c',
'efficientnet_b1c',
'efficientnet_b2c',
'efficientnet_b3c',
'efficientnet_b4c',
'efficientnet_b5c',
'efficientnet_b6c',
'efficientnet_b7c',
'efficientnet_b8c',
]
model_names = [name for name in included_names if name not in excluded_names]
for i, model_name in enumerate(model_names):
logging.info(f"\t{i + 1}/{len(model_names)}: {model_name.upper()}")
args = parse_args()
args.model_name = model_name
if model_name == "genet_large":
args.in_size = (256, 256)
elif model_name == 'inception_v3':
args.in_size = (299, 299)
try:
main(args=args)
except Exception as e:
logging.info(f"\tCan't bench {model_name} \n{repr(e)}")
logging.info(f"-" * 100)
if __name__ == '__main__':
bench_all() | tests/t_bench_torch.py | import argparse
import logging
from opti_models.benchmarks.imagenet_torch_benchmark import main
logging.basicConfig(level=logging.INFO)
def parse_args():
# Default args
path = "/usr/local/opti_models/imagenetv2-top-images-format-val"
parser = argparse.ArgumentParser(description='Simple speed benchmark, based on pyTorch models')
parser.add_argument('--model-name', type=str, help="Name of the model to test", default='resnet18')
parser.add_argument(
'--path-to-images', default=path, type=str, help=f"Path to the validation images, default: {path}"
)
parser.add_argument('--size', default=(224, 224), nargs='+', type=int, help="Input shape, default=(224, 224)")
parser.add_argument('--batch-size', default=1, type=int, help="Size of the batch of images, default=1")
parser.add_argument('--workers', default=1, type=int, help="Number of workers, default=1")
return parser.parse_args()
def bench_all():
from opti_models.models.backbones.backbone_factory import show_available_backbones
included_names = [name for name in show_available_backbones()]
excluded_names = [
'efficientnet_b1b',
'efficientnet_b2b',
'efficientnet_b3b',
'efficientnet_b4b',
'efficientnet_b5b',
'efficientnet_b6b',
'efficientnet_b7b',
'efficientnet_b0c',
'efficientnet_b1c',
'efficientnet_b2c',
'efficientnet_b3c',
'efficientnet_b4c',
'efficientnet_b5c',
'efficientnet_b6c',
'efficientnet_b7c',
'efficientnet_b8c',
]
model_names = [name for name in included_names if name not in excluded_names]
for i, model_name in enumerate(model_names):
logging.info(f"\t{i + 1}/{len(model_names)}: {model_name.upper()}")
args = parse_args()
args.model_name = model_name
if model_name == "genet_large":
args.in_size = (256, 256)
elif model_name == 'inception_v3':
args.in_size = (299, 299)
try:
main(args=args)
except Exception as e:
logging.info(f"\tCan't bench {model_name} \n{repr(e)}")
logging.info(f"-" * 100)
if __name__ == '__main__':
bench_all() | 0.433022 | 0.129375 |
import discord
import datetime
import time
import psutil
import sys
from discord.ext import commands
start_time = time.time()
class Utility(commands.Cog):
def __init__(self, bot):
self.bot = bot
# def restart_program():
# python = sys.executable
# os.execl(python, python, * sys.argv)
# Status Cycle
@commands.command()
@commands.is_owner()
async def online(self, ctx, *, cactivity = ""):
await self.bot.change_presence(status=discord.Status.online, activity=discord.Game(name=f'{cactivity}', type=3))
@commands.command()
@commands.is_owner()
async def idle(self, ctx, *, cactivity = ""):
await self.bot.change_presence(status=discord.Status.idle, activity=discord.Game(name=f'{cactivity}', type=3))
@commands.command()
@commands.is_owner()
async def dnd(self, ctx, *, cactivity = ""):
await self.bot.change_presence(status=discord.Status.dnd, activity=discord.Game(name=f'{cactivity}', type=3))
# Utility
@commands.command(pass_context=True)
async def status(self, ctx):
# Time
current_time = time.time()
difference = int(round(current_time - start_time))
utime = str(datetime.timedelta(seconds=difference))
embed = discord.Embed(
colour = discord.Colour.green()
)
# PSUtil - RAM Usage
dict(psutil.virtual_memory()._asdict())
usedmem = psutil.virtual_memory().used/1024/1024
# activemem = psutil.virtual_memory().active
tmem = psutil.virtual_memory().total/1024/1024
pmem = round((usedmem/tmem)*100)
# PSUtil - Swap Memory Usage
# dict(psutil.swap_memory()._asdict())
# uswap = psutil.swap_memory().used/1024/1024
# tswap = psutil.swap_memory().total/1024/1024
# pswap = round((uswap/tswap)*100)
#Bot Prefix Read
with open (f"./prefix.txt", "r") as botprefix:
prefix = botprefix.read()
# PSUtil Operating System
if psutil.LINUX:
os = 'Linux'
elif psutil.MACOS:
os = 'MacOS'
elif psutil.WINDOWS:
os = 'Windows'
else:
os = 'Unknown'
embed.set_author(name='System Monitor')
embed.add_field(name="CPU Usage", value=f'{psutil.cpu_percent()}%', inline=True)
embed.add_field(name="CPU Cores", value=psutil.cpu_count(), inline=True)
embed.add_field(name="RAM Usage", value=f'{round(usedmem)}/{round(tmem)}MB ({round(pmem)}%)', inline=True)
embed.add_field(name="Operating System", value=os, inline=True)
# embed.add_field(name="Swap Usage", value=f'{round(uswap)}/{round(tswap)}MB ({round(pmem)}%)', inline=True)
embed.add_field(name="Uptime", value=f'{utime}', inline=True)
embed.add_field(name='API Latency', value=f'{round(self.bot.latency * 1000)} ms', inline=True)
embed.add_field(name='Bot Prefix', value=f"`{prefix}`", inline=False)
embed.set_footer(text="Bot by SilentVOEZ")
await ctx.send(embed=embed)
botprefix.close()
# @commands.command()
# @commands.is_owner()
# async def reboot(self, ctx):
# await ctx.send('Rebooting...')
# restart_program()
@commands.command()
@commands.is_owner()
async def e(self, ctx):
print(f"---EMERGENCY SHUTDOWN REQUESTED BY {ctx.author}---")
await ctx.send("**--EMERGENCY SHUTDOWN--**")
await ctx.bot.logout()
def setup(bot):
bot.add_cog(Utility(bot)) | plugins/utility.py | import discord
import datetime
import time
import psutil
import sys
from discord.ext import commands
start_time = time.time()
class Utility(commands.Cog):
def __init__(self, bot):
self.bot = bot
# def restart_program():
# python = sys.executable
# os.execl(python, python, * sys.argv)
# Status Cycle
@commands.command()
@commands.is_owner()
async def online(self, ctx, *, cactivity = ""):
await self.bot.change_presence(status=discord.Status.online, activity=discord.Game(name=f'{cactivity}', type=3))
@commands.command()
@commands.is_owner()
async def idle(self, ctx, *, cactivity = ""):
await self.bot.change_presence(status=discord.Status.idle, activity=discord.Game(name=f'{cactivity}', type=3))
@commands.command()
@commands.is_owner()
async def dnd(self, ctx, *, cactivity = ""):
await self.bot.change_presence(status=discord.Status.dnd, activity=discord.Game(name=f'{cactivity}', type=3))
# Utility
@commands.command(pass_context=True)
async def status(self, ctx):
# Time
current_time = time.time()
difference = int(round(current_time - start_time))
utime = str(datetime.timedelta(seconds=difference))
embed = discord.Embed(
colour = discord.Colour.green()
)
# PSUtil - RAM Usage
dict(psutil.virtual_memory()._asdict())
usedmem = psutil.virtual_memory().used/1024/1024
# activemem = psutil.virtual_memory().active
tmem = psutil.virtual_memory().total/1024/1024
pmem = round((usedmem/tmem)*100)
# PSUtil - Swap Memory Usage
# dict(psutil.swap_memory()._asdict())
# uswap = psutil.swap_memory().used/1024/1024
# tswap = psutil.swap_memory().total/1024/1024
# pswap = round((uswap/tswap)*100)
#Bot Prefix Read
with open (f"./prefix.txt", "r") as botprefix:
prefix = botprefix.read()
# PSUtil Operating System
if psutil.LINUX:
os = 'Linux'
elif psutil.MACOS:
os = 'MacOS'
elif psutil.WINDOWS:
os = 'Windows'
else:
os = 'Unknown'
embed.set_author(name='System Monitor')
embed.add_field(name="CPU Usage", value=f'{psutil.cpu_percent()}%', inline=True)
embed.add_field(name="CPU Cores", value=psutil.cpu_count(), inline=True)
embed.add_field(name="RAM Usage", value=f'{round(usedmem)}/{round(tmem)}MB ({round(pmem)}%)', inline=True)
embed.add_field(name="Operating System", value=os, inline=True)
# embed.add_field(name="Swap Usage", value=f'{round(uswap)}/{round(tswap)}MB ({round(pmem)}%)', inline=True)
embed.add_field(name="Uptime", value=f'{utime}', inline=True)
embed.add_field(name='API Latency', value=f'{round(self.bot.latency * 1000)} ms', inline=True)
embed.add_field(name='Bot Prefix', value=f"`{prefix}`", inline=False)
embed.set_footer(text="Bot by SilentVOEZ")
await ctx.send(embed=embed)
botprefix.close()
# @commands.command()
# @commands.is_owner()
# async def reboot(self, ctx):
# await ctx.send('Rebooting...')
# restart_program()
@commands.command()
@commands.is_owner()
async def e(self, ctx):
print(f"---EMERGENCY SHUTDOWN REQUESTED BY {ctx.author}---")
await ctx.send("**--EMERGENCY SHUTDOWN--**")
await ctx.bot.logout()
def setup(bot):
bot.add_cog(Utility(bot)) | 0.184915 | 0.053675 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['PipelineArgs', 'Pipeline']
@pulumi.input_type
class PipelineArgs:
def __init__(__self__, *,
is_exposed: pulumi.Input[bool],
is_paused: pulumi.Input[bool],
pipeline_config: pulumi.Input[str],
pipeline_config_format: pulumi.Input[str],
pipeline_name: pulumi.Input[str],
team_name: pulumi.Input[str]):
"""
The set of arguments for constructing a Pipeline resource.
"""
pulumi.set(__self__, "is_exposed", is_exposed)
pulumi.set(__self__, "is_paused", is_paused)
pulumi.set(__self__, "pipeline_config", pipeline_config)
pulumi.set(__self__, "pipeline_config_format", pipeline_config_format)
pulumi.set(__self__, "pipeline_name", pipeline_name)
pulumi.set(__self__, "team_name", team_name)
@property
@pulumi.getter(name="isExposed")
def is_exposed(self) -> pulumi.Input[bool]:
return pulumi.get(self, "is_exposed")
@is_exposed.setter
def is_exposed(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_exposed", value)
@property
@pulumi.getter(name="isPaused")
def is_paused(self) -> pulumi.Input[bool]:
return pulumi.get(self, "is_paused")
@is_paused.setter
def is_paused(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_paused", value)
@property
@pulumi.getter(name="pipelineConfig")
def pipeline_config(self) -> pulumi.Input[str]:
return pulumi.get(self, "pipeline_config")
@pipeline_config.setter
def pipeline_config(self, value: pulumi.Input[str]):
pulumi.set(self, "pipeline_config", value)
@property
@pulumi.getter(name="pipelineConfigFormat")
def pipeline_config_format(self) -> pulumi.Input[str]:
return pulumi.get(self, "pipeline_config_format")
@pipeline_config_format.setter
def pipeline_config_format(self, value: pulumi.Input[str]):
pulumi.set(self, "pipeline_config_format", value)
@property
@pulumi.getter(name="pipelineName")
def pipeline_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "pipeline_name")
@pipeline_name.setter
def pipeline_name(self, value: pulumi.Input[str]):
pulumi.set(self, "pipeline_name", value)
@property
@pulumi.getter(name="teamName")
def team_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "team_name")
@team_name.setter
def team_name(self, value: pulumi.Input[str]):
pulumi.set(self, "team_name", value)
@pulumi.input_type
class _PipelineState:
def __init__(__self__, *,
is_exposed: Optional[pulumi.Input[bool]] = None,
is_paused: Optional[pulumi.Input[bool]] = None,
json: Optional[pulumi.Input[str]] = None,
pipeline_config: Optional[pulumi.Input[str]] = None,
pipeline_config_format: Optional[pulumi.Input[str]] = None,
pipeline_name: Optional[pulumi.Input[str]] = None,
team_name: Optional[pulumi.Input[str]] = None,
yaml: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Pipeline resources.
"""
if is_exposed is not None:
pulumi.set(__self__, "is_exposed", is_exposed)
if is_paused is not None:
pulumi.set(__self__, "is_paused", is_paused)
if json is not None:
pulumi.set(__self__, "json", json)
if pipeline_config is not None:
pulumi.set(__self__, "pipeline_config", pipeline_config)
if pipeline_config_format is not None:
pulumi.set(__self__, "pipeline_config_format", pipeline_config_format)
if pipeline_name is not None:
pulumi.set(__self__, "pipeline_name", pipeline_name)
if team_name is not None:
pulumi.set(__self__, "team_name", team_name)
if yaml is not None:
pulumi.set(__self__, "yaml", yaml)
@property
@pulumi.getter(name="isExposed")
def is_exposed(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "is_exposed")
@is_exposed.setter
def is_exposed(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_exposed", value)
@property
@pulumi.getter(name="isPaused")
def is_paused(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "is_paused")
@is_paused.setter
def is_paused(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_paused", value)
@property
@pulumi.getter
def json(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "json")
@json.setter
def json(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "json", value)
@property
@pulumi.getter(name="pipelineConfig")
def pipeline_config(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "pipeline_config")
@pipeline_config.setter
def pipeline_config(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pipeline_config", value)
@property
@pulumi.getter(name="pipelineConfigFormat")
def pipeline_config_format(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "pipeline_config_format")
@pipeline_config_format.setter
def pipeline_config_format(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pipeline_config_format", value)
@property
@pulumi.getter(name="pipelineName")
def pipeline_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "pipeline_name")
@pipeline_name.setter
def pipeline_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pipeline_name", value)
@property
@pulumi.getter(name="teamName")
def team_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "team_name")
@team_name.setter
def team_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "team_name", value)
@property
@pulumi.getter
def yaml(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "yaml")
@yaml.setter
def yaml(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "yaml", value)
class Pipeline(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
is_exposed: Optional[pulumi.Input[bool]] = None,
is_paused: Optional[pulumi.Input[bool]] = None,
pipeline_config: Optional[pulumi.Input[str]] = None,
pipeline_config_format: Optional[pulumi.Input[str]] = None,
pipeline_name: Optional[pulumi.Input[str]] = None,
team_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Create a Pipeline resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: PipelineArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a Pipeline resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param PipelineArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(PipelineArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
is_exposed: Optional[pulumi.Input[bool]] = None,
is_paused: Optional[pulumi.Input[bool]] = None,
pipeline_config: Optional[pulumi.Input[str]] = None,
pipeline_config_format: Optional[pulumi.Input[str]] = None,
pipeline_name: Optional[pulumi.Input[str]] = None,
team_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.plugin_download_url is None:
opts.plugin_download_url = _utilities.get_plugin_download_url()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PipelineArgs.__new__(PipelineArgs)
if is_exposed is None and not opts.urn:
raise TypeError("Missing required property 'is_exposed'")
__props__.__dict__["is_exposed"] = is_exposed
if is_paused is None and not opts.urn:
raise TypeError("Missing required property 'is_paused'")
__props__.__dict__["is_paused"] = is_paused
if pipeline_config is None and not opts.urn:
raise TypeError("Missing required property 'pipeline_config'")
__props__.__dict__["pipeline_config"] = pipeline_config
if pipeline_config_format is None and not opts.urn:
raise TypeError("Missing required property 'pipeline_config_format'")
__props__.__dict__["pipeline_config_format"] = pipeline_config_format
if pipeline_name is None and not opts.urn:
raise TypeError("Missing required property 'pipeline_name'")
__props__.__dict__["pipeline_name"] = pipeline_name
if team_name is None and not opts.urn:
raise TypeError("Missing required property 'team_name'")
__props__.__dict__["team_name"] = team_name
__props__.__dict__["json"] = None
__props__.__dict__["yaml"] = None
super(Pipeline, __self__).__init__(
'concourse:index/pipeline:Pipeline',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
is_exposed: Optional[pulumi.Input[bool]] = None,
is_paused: Optional[pulumi.Input[bool]] = None,
json: Optional[pulumi.Input[str]] = None,
pipeline_config: Optional[pulumi.Input[str]] = None,
pipeline_config_format: Optional[pulumi.Input[str]] = None,
pipeline_name: Optional[pulumi.Input[str]] = None,
team_name: Optional[pulumi.Input[str]] = None,
yaml: Optional[pulumi.Input[str]] = None) -> 'Pipeline':
"""
Get an existing Pipeline resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _PipelineState.__new__(_PipelineState)
__props__.__dict__["is_exposed"] = is_exposed
__props__.__dict__["is_paused"] = is_paused
__props__.__dict__["json"] = json
__props__.__dict__["pipeline_config"] = pipeline_config
__props__.__dict__["pipeline_config_format"] = pipeline_config_format
__props__.__dict__["pipeline_name"] = pipeline_name
__props__.__dict__["team_name"] = team_name
__props__.__dict__["yaml"] = yaml
return Pipeline(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="isExposed")
def is_exposed(self) -> pulumi.Output[bool]:
return pulumi.get(self, "is_exposed")
@property
@pulumi.getter(name="isPaused")
def is_paused(self) -> pulumi.Output[bool]:
return pulumi.get(self, "is_paused")
@property
@pulumi.getter
def json(self) -> pulumi.Output[str]:
return pulumi.get(self, "json")
@property
@pulumi.getter(name="pipelineConfig")
def pipeline_config(self) -> pulumi.Output[str]:
return pulumi.get(self, "pipeline_config")
@property
@pulumi.getter(name="pipelineConfigFormat")
def pipeline_config_format(self) -> pulumi.Output[str]:
return pulumi.get(self, "pipeline_config_format")
@property
@pulumi.getter(name="pipelineName")
def pipeline_name(self) -> pulumi.Output[str]:
return pulumi.get(self, "pipeline_name")
@property
@pulumi.getter(name="teamName")
def team_name(self) -> pulumi.Output[str]:
return pulumi.get(self, "team_name")
@property
@pulumi.getter
def yaml(self) -> pulumi.Output[str]:
return pulumi.get(self, "yaml") | sdk/python/pulumiverse_concourse/pipeline.py |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['PipelineArgs', 'Pipeline']
@pulumi.input_type
class PipelineArgs:
def __init__(__self__, *,
is_exposed: pulumi.Input[bool],
is_paused: pulumi.Input[bool],
pipeline_config: pulumi.Input[str],
pipeline_config_format: pulumi.Input[str],
pipeline_name: pulumi.Input[str],
team_name: pulumi.Input[str]):
"""
The set of arguments for constructing a Pipeline resource.
"""
pulumi.set(__self__, "is_exposed", is_exposed)
pulumi.set(__self__, "is_paused", is_paused)
pulumi.set(__self__, "pipeline_config", pipeline_config)
pulumi.set(__self__, "pipeline_config_format", pipeline_config_format)
pulumi.set(__self__, "pipeline_name", pipeline_name)
pulumi.set(__self__, "team_name", team_name)
@property
@pulumi.getter(name="isExposed")
def is_exposed(self) -> pulumi.Input[bool]:
return pulumi.get(self, "is_exposed")
@is_exposed.setter
def is_exposed(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_exposed", value)
@property
@pulumi.getter(name="isPaused")
def is_paused(self) -> pulumi.Input[bool]:
return pulumi.get(self, "is_paused")
@is_paused.setter
def is_paused(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_paused", value)
@property
@pulumi.getter(name="pipelineConfig")
def pipeline_config(self) -> pulumi.Input[str]:
return pulumi.get(self, "pipeline_config")
@pipeline_config.setter
def pipeline_config(self, value: pulumi.Input[str]):
pulumi.set(self, "pipeline_config", value)
@property
@pulumi.getter(name="pipelineConfigFormat")
def pipeline_config_format(self) -> pulumi.Input[str]:
return pulumi.get(self, "pipeline_config_format")
@pipeline_config_format.setter
def pipeline_config_format(self, value: pulumi.Input[str]):
pulumi.set(self, "pipeline_config_format", value)
@property
@pulumi.getter(name="pipelineName")
def pipeline_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "pipeline_name")
@pipeline_name.setter
def pipeline_name(self, value: pulumi.Input[str]):
pulumi.set(self, "pipeline_name", value)
@property
@pulumi.getter(name="teamName")
def team_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "team_name")
@team_name.setter
def team_name(self, value: pulumi.Input[str]):
pulumi.set(self, "team_name", value)
@pulumi.input_type
class _PipelineState:
def __init__(__self__, *,
is_exposed: Optional[pulumi.Input[bool]] = None,
is_paused: Optional[pulumi.Input[bool]] = None,
json: Optional[pulumi.Input[str]] = None,
pipeline_config: Optional[pulumi.Input[str]] = None,
pipeline_config_format: Optional[pulumi.Input[str]] = None,
pipeline_name: Optional[pulumi.Input[str]] = None,
team_name: Optional[pulumi.Input[str]] = None,
yaml: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Pipeline resources.
"""
if is_exposed is not None:
pulumi.set(__self__, "is_exposed", is_exposed)
if is_paused is not None:
pulumi.set(__self__, "is_paused", is_paused)
if json is not None:
pulumi.set(__self__, "json", json)
if pipeline_config is not None:
pulumi.set(__self__, "pipeline_config", pipeline_config)
if pipeline_config_format is not None:
pulumi.set(__self__, "pipeline_config_format", pipeline_config_format)
if pipeline_name is not None:
pulumi.set(__self__, "pipeline_name", pipeline_name)
if team_name is not None:
pulumi.set(__self__, "team_name", team_name)
if yaml is not None:
pulumi.set(__self__, "yaml", yaml)
@property
@pulumi.getter(name="isExposed")
def is_exposed(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "is_exposed")
@is_exposed.setter
def is_exposed(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_exposed", value)
@property
@pulumi.getter(name="isPaused")
def is_paused(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "is_paused")
@is_paused.setter
def is_paused(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_paused", value)
@property
@pulumi.getter
def json(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "json")
@json.setter
def json(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "json", value)
@property
@pulumi.getter(name="pipelineConfig")
def pipeline_config(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "pipeline_config")
@pipeline_config.setter
def pipeline_config(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pipeline_config", value)
@property
@pulumi.getter(name="pipelineConfigFormat")
def pipeline_config_format(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "pipeline_config_format")
@pipeline_config_format.setter
def pipeline_config_format(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pipeline_config_format", value)
@property
@pulumi.getter(name="pipelineName")
def pipeline_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "pipeline_name")
@pipeline_name.setter
def pipeline_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pipeline_name", value)
@property
@pulumi.getter(name="teamName")
def team_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "team_name")
@team_name.setter
def team_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "team_name", value)
@property
@pulumi.getter
def yaml(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "yaml")
@yaml.setter
def yaml(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "yaml", value)
class Pipeline(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
is_exposed: Optional[pulumi.Input[bool]] = None,
is_paused: Optional[pulumi.Input[bool]] = None,
pipeline_config: Optional[pulumi.Input[str]] = None,
pipeline_config_format: Optional[pulumi.Input[str]] = None,
pipeline_name: Optional[pulumi.Input[str]] = None,
team_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Create a Pipeline resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: PipelineArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a Pipeline resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param PipelineArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(PipelineArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
is_exposed: Optional[pulumi.Input[bool]] = None,
is_paused: Optional[pulumi.Input[bool]] = None,
pipeline_config: Optional[pulumi.Input[str]] = None,
pipeline_config_format: Optional[pulumi.Input[str]] = None,
pipeline_name: Optional[pulumi.Input[str]] = None,
team_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.plugin_download_url is None:
opts.plugin_download_url = _utilities.get_plugin_download_url()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PipelineArgs.__new__(PipelineArgs)
if is_exposed is None and not opts.urn:
raise TypeError("Missing required property 'is_exposed'")
__props__.__dict__["is_exposed"] = is_exposed
if is_paused is None and not opts.urn:
raise TypeError("Missing required property 'is_paused'")
__props__.__dict__["is_paused"] = is_paused
if pipeline_config is None and not opts.urn:
raise TypeError("Missing required property 'pipeline_config'")
__props__.__dict__["pipeline_config"] = pipeline_config
if pipeline_config_format is None and not opts.urn:
raise TypeError("Missing required property 'pipeline_config_format'")
__props__.__dict__["pipeline_config_format"] = pipeline_config_format
if pipeline_name is None and not opts.urn:
raise TypeError("Missing required property 'pipeline_name'")
__props__.__dict__["pipeline_name"] = pipeline_name
if team_name is None and not opts.urn:
raise TypeError("Missing required property 'team_name'")
__props__.__dict__["team_name"] = team_name
__props__.__dict__["json"] = None
__props__.__dict__["yaml"] = None
super(Pipeline, __self__).__init__(
'concourse:index/pipeline:Pipeline',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
is_exposed: Optional[pulumi.Input[bool]] = None,
is_paused: Optional[pulumi.Input[bool]] = None,
json: Optional[pulumi.Input[str]] = None,
pipeline_config: Optional[pulumi.Input[str]] = None,
pipeline_config_format: Optional[pulumi.Input[str]] = None,
pipeline_name: Optional[pulumi.Input[str]] = None,
team_name: Optional[pulumi.Input[str]] = None,
yaml: Optional[pulumi.Input[str]] = None) -> 'Pipeline':
"""
Get an existing Pipeline resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _PipelineState.__new__(_PipelineState)
__props__.__dict__["is_exposed"] = is_exposed
__props__.__dict__["is_paused"] = is_paused
__props__.__dict__["json"] = json
__props__.__dict__["pipeline_config"] = pipeline_config
__props__.__dict__["pipeline_config_format"] = pipeline_config_format
__props__.__dict__["pipeline_name"] = pipeline_name
__props__.__dict__["team_name"] = team_name
__props__.__dict__["yaml"] = yaml
return Pipeline(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="isExposed")
def is_exposed(self) -> pulumi.Output[bool]:
return pulumi.get(self, "is_exposed")
@property
@pulumi.getter(name="isPaused")
def is_paused(self) -> pulumi.Output[bool]:
return pulumi.get(self, "is_paused")
@property
@pulumi.getter
def json(self) -> pulumi.Output[str]:
return pulumi.get(self, "json")
@property
@pulumi.getter(name="pipelineConfig")
def pipeline_config(self) -> pulumi.Output[str]:
return pulumi.get(self, "pipeline_config")
@property
@pulumi.getter(name="pipelineConfigFormat")
def pipeline_config_format(self) -> pulumi.Output[str]:
return pulumi.get(self, "pipeline_config_format")
@property
@pulumi.getter(name="pipelineName")
def pipeline_name(self) -> pulumi.Output[str]:
return pulumi.get(self, "pipeline_name")
@property
@pulumi.getter(name="teamName")
def team_name(self) -> pulumi.Output[str]:
return pulumi.get(self, "team_name")
@property
@pulumi.getter
def yaml(self) -> pulumi.Output[str]:
return pulumi.get(self, "yaml") | 0.832883 | 0.058158 |
import argparse
import asyncio
from datetime import datetime
import logging
from bncs import BnetClient, ChatEventType, ClientStatus, LocalHashingProvider, BncsProduct
class SampleBnetClient(BnetClient):
async def _handle_chat_event(self, packet):
event = await super()._handle_chat_event(packet)
if event.eid == ChatEventType.UserTalk:
print(f"<{event.username}> {event.text}")
elif event.eid == ChatEventType.UserEmote:
print(f"<{event.username} {event.text}>")
elif event.eid == ChatEventType.WhisperSent:
print(f"<To {event.username}> {event.text}")
elif event.eid == ChatEventType.UserWhisper:
print(f"<From {event.username}> {event.text}")
async def main(args):
config = {
"username": args.username,
"password": <PASSWORD>,
"product": args.product,
"server": args.server,
"keys": args.keys.split(',') if args.keys else []
}
client = SampleBnetClient(**config)
if args.hashes:
platform = client.config["platform"]
product = BncsProduct.get(args.product)
our_key = (platform, product.code)
files = {
our_key: product.hashes[platform]
}
# By only loading the hash files for our platform+product, we don't waste time loading stuff we don't need.
client.hashing_provider = LocalHashingProvider(args.hashes, files)
await client.hashing_provider.preload([our_key], True)
await client.hashing_provider.connect()
def get_user_input():
return input()
try:
await client.full_connect_and_join()
if hasattr(client.hashing_provider, 'connected'):
client.hashing_provider.disconnect("done")
if client.status == ClientStatus.Chatting:
while client.connected:
raw_input = await asyncio.get_event_loop().run_in_executor(None, get_user_input)
is_local_cmd = True
if raw_input[0] == '/':
args = raw_input[1:].split(' ')
cmd = args.pop(0).lower()
if cmd == "exit":
client.disconnect("Quit")
break
elif cmd == "channels":
product = client.state["product"].code if len(args) == 0 else args[0]
channels = await client.request_channel_list(product)
print(f"Channels available for '{product}':")
for i in range(0, len(channels), 3):
print(f"\t{', '.join(c for c in channels[i:i+3])}")
elif cmd == "filetime":
if len(args) == 0:
print("Command usage: /filetime <filename>")
else:
ft = await client.get_filetime(args[0])
print(f"Filetime for '{args[0]}': {ft}")
elif cmd == "profile":
user = None if len(args) == 0 else args[0]
data = await client.request_profile(user)
if data:
user = user or client.state["account_name"]
print(f"Profile for '{user}': {data}")
else:
print("Profile request returned no data")
elif cmd == "accountinfo":
data = await client.request_account_keys()
if data:
print(f"Account info: {data}")
else:
print("No account info returned")
elif cmd == "ad":
platform, product = args[0].split('\\') if len(args) == 1 else (None, None)
banner = await client.check_ad(platform=platform, product=product)
print(f"Current ad banner: {banner}")
elif cmd == "news":
news, motd = await client.get_news_info()
latest = news[-1] if len(news) > 0 else None
print(f"Server MotD: {motd}")
print(f"Latest news: {datetime.utcfromtimestamp(latest[0]).isoformat()} -> " +
str(latest[1].split('\n')))
else:
is_local_cmd = False
if not is_local_cmd:
await client.send_command(raw_input)
finally:
if client and client.connected:
client.disconnect("dead")
await client.wait_closed()
if client.hashing_provider and hasattr(client.hashing_provider, 'connected') \
and client.hashing_provider.connected:
client.hashing_provider.disconnect("dead")
await client.hashing_provider.wait_closed()
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument("username", help="Name of the account to login as")
parser.add_argument("password", help="Password for the account")
parser.add_argument("--server", help="Hostname or IP of the Battle.net server", default='useast.battle.net')
parser.add_argument("--product", help="4-digit code of the game to emulate (ex: WAR3, SEXP)", default='DRTL')
parser.add_argument("--keys", help="Comma separated list of CD keys for the emulated product")
parser.add_argument("--hashes", help="Path to directory containing game hash files")
aa = parser.parse_args()
try:
asyncio.run(main(aa))
except KeyboardInterrupt:
pass | scripts/bncs_client.py | import argparse
import asyncio
from datetime import datetime
import logging
from bncs import BnetClient, ChatEventType, ClientStatus, LocalHashingProvider, BncsProduct
class SampleBnetClient(BnetClient):
async def _handle_chat_event(self, packet):
event = await super()._handle_chat_event(packet)
if event.eid == ChatEventType.UserTalk:
print(f"<{event.username}> {event.text}")
elif event.eid == ChatEventType.UserEmote:
print(f"<{event.username} {event.text}>")
elif event.eid == ChatEventType.WhisperSent:
print(f"<To {event.username}> {event.text}")
elif event.eid == ChatEventType.UserWhisper:
print(f"<From {event.username}> {event.text}")
async def main(args):
config = {
"username": args.username,
"password": <PASSWORD>,
"product": args.product,
"server": args.server,
"keys": args.keys.split(',') if args.keys else []
}
client = SampleBnetClient(**config)
if args.hashes:
platform = client.config["platform"]
product = BncsProduct.get(args.product)
our_key = (platform, product.code)
files = {
our_key: product.hashes[platform]
}
# By only loading the hash files for our platform+product, we don't waste time loading stuff we don't need.
client.hashing_provider = LocalHashingProvider(args.hashes, files)
await client.hashing_provider.preload([our_key], True)
await client.hashing_provider.connect()
def get_user_input():
return input()
try:
await client.full_connect_and_join()
if hasattr(client.hashing_provider, 'connected'):
client.hashing_provider.disconnect("done")
if client.status == ClientStatus.Chatting:
while client.connected:
raw_input = await asyncio.get_event_loop().run_in_executor(None, get_user_input)
is_local_cmd = True
if raw_input[0] == '/':
args = raw_input[1:].split(' ')
cmd = args.pop(0).lower()
if cmd == "exit":
client.disconnect("Quit")
break
elif cmd == "channels":
product = client.state["product"].code if len(args) == 0 else args[0]
channels = await client.request_channel_list(product)
print(f"Channels available for '{product}':")
for i in range(0, len(channels), 3):
print(f"\t{', '.join(c for c in channels[i:i+3])}")
elif cmd == "filetime":
if len(args) == 0:
print("Command usage: /filetime <filename>")
else:
ft = await client.get_filetime(args[0])
print(f"Filetime for '{args[0]}': {ft}")
elif cmd == "profile":
user = None if len(args) == 0 else args[0]
data = await client.request_profile(user)
if data:
user = user or client.state["account_name"]
print(f"Profile for '{user}': {data}")
else:
print("Profile request returned no data")
elif cmd == "accountinfo":
data = await client.request_account_keys()
if data:
print(f"Account info: {data}")
else:
print("No account info returned")
elif cmd == "ad":
platform, product = args[0].split('\\') if len(args) == 1 else (None, None)
banner = await client.check_ad(platform=platform, product=product)
print(f"Current ad banner: {banner}")
elif cmd == "news":
news, motd = await client.get_news_info()
latest = news[-1] if len(news) > 0 else None
print(f"Server MotD: {motd}")
print(f"Latest news: {datetime.utcfromtimestamp(latest[0]).isoformat()} -> " +
str(latest[1].split('\n')))
else:
is_local_cmd = False
if not is_local_cmd:
await client.send_command(raw_input)
finally:
if client and client.connected:
client.disconnect("dead")
await client.wait_closed()
if client.hashing_provider and hasattr(client.hashing_provider, 'connected') \
and client.hashing_provider.connected:
client.hashing_provider.disconnect("dead")
await client.hashing_provider.wait_closed()
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument("username", help="Name of the account to login as")
parser.add_argument("password", help="Password for the account")
parser.add_argument("--server", help="Hostname or IP of the Battle.net server", default='useast.battle.net')
parser.add_argument("--product", help="4-digit code of the game to emulate (ex: WAR3, SEXP)", default='DRTL')
parser.add_argument("--keys", help="Comma separated list of CD keys for the emulated product")
parser.add_argument("--hashes", help="Path to directory containing game hash files")
aa = parser.parse_args()
try:
asyncio.run(main(aa))
except KeyboardInterrupt:
pass | 0.273671 | 0.158597 |
import sys, threading, time, random, socket
def server():
# Establish port via command-line argument
port = int(sys.argv[1])
# Create file object to read RS DNS table
RSFile = open("PROJI-DNSRS.txt", "r")
# Initialize dictionary for DNS table
DNSTable = {}
# Store RS DNS table in dictionary
for line in RSFile:
hostname, IPAddress, flag = line.split()
hostname = hostname.lower()
DNSTable[hostname] = hostname + " " + IPAddress + " " + flag
if flag == "NS":
TSHostname = hostname + " - " + flag
print("Creating DNS dictionary: " + str(DNSTable) + "\n")
try:
serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("RS server socket created: port " + str(port) + "\n")
except socket.error as socketError:
print('RS socket already open, error: {}\n'.format(socketError))
exit()
serverBinding = ('', port)
serverSocket.bind(serverBinding)
serverSocket.listen(1)
RSHostname = socket.gethostname()
print("RS server hostname: {}".format(RSHostname))
localhostIP = (socket.gethostbyname(RSHostname))
print("RS server IP address: {}".format(localhostIP))
while True:
clientSocketID, address = serverSocket.accept()
print("Received client connection request from: {}".format(address))
# Server greeting message to client
greeting = "Welcome to CS 352 RS server! Socket to me!"
clientSocketID.send(greeting.encode('utf-8'))
# Receive hostname query from the client
queryFromClient = clientSocketID.recv(256)
# The client is done querying
if queryFromClient == "shutdownRSServer":
print("Received shutdown command...")
clientSocketID.close()
break
# If hostname is in dictionary, send hostname information
elif queryFromClient in DNSTable:
clientSocketID.send(str(DNSTable[queryFromClient]).encode('utf-8'))
# Hostname not in dictionary, send TS server information
else:
clientSocketID.send(TSHostname.encode('utf-8'))
# Close the client socket connection
print("\nClosing socket connection.\n")
clientSocketID.close()
# Close server socket and shutdown server
serverSocket.close()
exit()
if __name__ == "__main__":
thread = threading.Thread(name='server', target = server)
thread.start()
sleepTime = random.random() * 5
print("\nRS server thread executed, sleep time: " + str(sleepTime) + " sec\n")
time.sleep(sleepTime) | anthonyProject1/rs.py |
import sys, threading, time, random, socket
def server():
# Establish port via command-line argument
port = int(sys.argv[1])
# Create file object to read RS DNS table
RSFile = open("PROJI-DNSRS.txt", "r")
# Initialize dictionary for DNS table
DNSTable = {}
# Store RS DNS table in dictionary
for line in RSFile:
hostname, IPAddress, flag = line.split()
hostname = hostname.lower()
DNSTable[hostname] = hostname + " " + IPAddress + " " + flag
if flag == "NS":
TSHostname = hostname + " - " + flag
print("Creating DNS dictionary: " + str(DNSTable) + "\n")
try:
serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("RS server socket created: port " + str(port) + "\n")
except socket.error as socketError:
print('RS socket already open, error: {}\n'.format(socketError))
exit()
serverBinding = ('', port)
serverSocket.bind(serverBinding)
serverSocket.listen(1)
RSHostname = socket.gethostname()
print("RS server hostname: {}".format(RSHostname))
localhostIP = (socket.gethostbyname(RSHostname))
print("RS server IP address: {}".format(localhostIP))
while True:
clientSocketID, address = serverSocket.accept()
print("Received client connection request from: {}".format(address))
# Server greeting message to client
greeting = "Welcome to CS 352 RS server! Socket to me!"
clientSocketID.send(greeting.encode('utf-8'))
# Receive hostname query from the client
queryFromClient = clientSocketID.recv(256)
# The client is done querying
if queryFromClient == "shutdownRSServer":
print("Received shutdown command...")
clientSocketID.close()
break
# If hostname is in dictionary, send hostname information
elif queryFromClient in DNSTable:
clientSocketID.send(str(DNSTable[queryFromClient]).encode('utf-8'))
# Hostname not in dictionary, send TS server information
else:
clientSocketID.send(TSHostname.encode('utf-8'))
# Close the client socket connection
print("\nClosing socket connection.\n")
clientSocketID.close()
# Close server socket and shutdown server
serverSocket.close()
exit()
if __name__ == "__main__":
thread = threading.Thread(name='server', target = server)
thread.start()
sleepTime = random.random() * 5
print("\nRS server thread executed, sleep time: " + str(sleepTime) + " sec\n")
time.sleep(sleepTime) | 0.228156 | 0.063657 |
import os
import pytest
from click.testing import CliRunner
from cogctl.cli.main import cli
from functools import partial
@pytest.fixture
def cogctl():
"""Set up a test runner from the true root of the application, instead
of testing individual commands directly.
"""
runner = CliRunner()
with runner.isolated_filesystem():
yield partial(runner.invoke,
cli,
catch_exceptions=False)
@pytest.fixture
def config_file(cogctl):
with open("config", "w") as f:
f.write("""\
[defaults]
profile = default_profile
[default_profile]
host = default_host
password = <PASSWORD>
port = 4000
secure = false
user = default_user
[testing]
host = cog.testing.com
password = <PASSWORD>
port = 1234
secure = true
user = tester
[new-style]
url = https://cog.newstyle.com:1234
user = new_user
password = <PASSWORD>
""")
return "{}/{}".format(os.getcwd(), "config")
def test_list_profiles(cogctl, config_file):
result = cogctl(["--config-file", config_file,
"profile"])
assert result.exit_code == 0
assert result.output == """\
Profile: default_profile (default)
User: default_user
URL: http://default_host:4000
Profile: new-style
User: new_user
URL: https://cog.newstyle.com:1234
Profile: testing
User: tester
URL: https://cog.testing.com:1234
"""
def test_add_new_profile(cogctl, config_file):
result = cogctl(["--config-file", config_file,
"profile", "create",
"my_new_profile",
"https://myserver.com:1234",
"my_user", "my_password"])
assert result.exit_code == 0
assert result.output == ""
with open(config_file) as f:
contents = f.read()
assert contents == """\
[defaults]
profile = default_profile
[default_profile]
host = default_host
password = <PASSWORD>
port = 4000
secure = false
user = default_user
[testing]
host = cog.testing.com
password = <PASSWORD>
port = 1234
secure = true
user = tester
[new-style]
url = https://cog.newstyle.com:1234
user = new_user
password = <PASSWORD>
[my_new_profile]
password = <PASSWORD>
url = https://myserver.com:1234
user = my_user
"""
def test_change_default_profile(cogctl, config_file):
result = cogctl(["--config-file", config_file,
"profile", "default",
"testing"])
assert result.exit_code == 0
assert result.output == ""
with open(config_file) as f:
contents = f.read()
assert contents == """\
[defaults]
profile = testing
[default_profile]
host = default_host
password = <PASSWORD>
port = 4000
secure = false
user = default_user
[testing]
host = cog.testing.com
password = <PASSWORD>
port = 1234
secure = true
user = tester
[new-style]
url = https://cog.newstyle.com:1234
user = new_user
password = <PASSWORD>
"""
def test_change_default_invalid_profile(cogctl, config_file):
result = cogctl(["--config-file", config_file,
"profile", "default",
"missing"])
assert result.exit_code == 2
assert result.output == """\
Usage: cli profile default [OPTIONS] NAME
Error: Invalid value for \"name\": \"missing\" was not found
"""
with open(config_file) as f:
contents = f.read()
assert contents == """\
[defaults]
profile = default_profile
[default_profile]
host = default_host
password = <PASSWORD>
port = 4000
secure = false
user = default_user
[testing]
host = cog.testing.com
password = <PASSWORD>
port = 1234
secure = true
user = tester
[new-style]
url = https://cog.newstyle.com:1234
user = new_user
password = <PASSWORD>
""" | tests/cogctl/cli/test_profile.py | import os
import pytest
from click.testing import CliRunner
from cogctl.cli.main import cli
from functools import partial
@pytest.fixture
def cogctl():
"""Set up a test runner from the true root of the application, instead
of testing individual commands directly.
"""
runner = CliRunner()
with runner.isolated_filesystem():
yield partial(runner.invoke,
cli,
catch_exceptions=False)
@pytest.fixture
def config_file(cogctl):
with open("config", "w") as f:
f.write("""\
[defaults]
profile = default_profile
[default_profile]
host = default_host
password = <PASSWORD>
port = 4000
secure = false
user = default_user
[testing]
host = cog.testing.com
password = <PASSWORD>
port = 1234
secure = true
user = tester
[new-style]
url = https://cog.newstyle.com:1234
user = new_user
password = <PASSWORD>
""")
return "{}/{}".format(os.getcwd(), "config")
def test_list_profiles(cogctl, config_file):
result = cogctl(["--config-file", config_file,
"profile"])
assert result.exit_code == 0
assert result.output == """\
Profile: default_profile (default)
User: default_user
URL: http://default_host:4000
Profile: new-style
User: new_user
URL: https://cog.newstyle.com:1234
Profile: testing
User: tester
URL: https://cog.testing.com:1234
"""
def test_add_new_profile(cogctl, config_file):
result = cogctl(["--config-file", config_file,
"profile", "create",
"my_new_profile",
"https://myserver.com:1234",
"my_user", "my_password"])
assert result.exit_code == 0
assert result.output == ""
with open(config_file) as f:
contents = f.read()
assert contents == """\
[defaults]
profile = default_profile
[default_profile]
host = default_host
password = <PASSWORD>
port = 4000
secure = false
user = default_user
[testing]
host = cog.testing.com
password = <PASSWORD>
port = 1234
secure = true
user = tester
[new-style]
url = https://cog.newstyle.com:1234
user = new_user
password = <PASSWORD>
[my_new_profile]
password = <PASSWORD>
url = https://myserver.com:1234
user = my_user
"""
def test_change_default_profile(cogctl, config_file):
result = cogctl(["--config-file", config_file,
"profile", "default",
"testing"])
assert result.exit_code == 0
assert result.output == ""
with open(config_file) as f:
contents = f.read()
assert contents == """\
[defaults]
profile = testing
[default_profile]
host = default_host
password = <PASSWORD>
port = 4000
secure = false
user = default_user
[testing]
host = cog.testing.com
password = <PASSWORD>
port = 1234
secure = true
user = tester
[new-style]
url = https://cog.newstyle.com:1234
user = new_user
password = <PASSWORD>
"""
def test_change_default_invalid_profile(cogctl, config_file):
result = cogctl(["--config-file", config_file,
"profile", "default",
"missing"])
assert result.exit_code == 2
assert result.output == """\
Usage: cli profile default [OPTIONS] NAME
Error: Invalid value for \"name\": \"missing\" was not found
"""
with open(config_file) as f:
contents = f.read()
assert contents == """\
[defaults]
profile = default_profile
[default_profile]
host = default_host
password = <PASSWORD>
port = 4000
secure = false
user = default_user
[testing]
host = cog.testing.com
password = <PASSWORD>
port = 1234
secure = true
user = tester
[new-style]
url = https://cog.newstyle.com:1234
user = new_user
password = <PASSWORD>
""" | 0.441191 | 0.297052 |
from __future__ import annotations
from bfassist.sql import *
from bfassist.master.league import LeaguePlayers, LeaguePlayer, LineUp
# noinspection PyUnusedLocal
def __preload__(forClient: bool = True):
pass
# noinspection PyUnusedLocal
def __postload__(forClient: bool = True):
pass
def pyPlayerToSQL(inPlayer: LeaguePlayer):
if inPlayer is not None:
return inPlayer.getKeyhash()
else:
return ""
def pyLineUpToSQL(inLineUp: LineUp):
return ";".join([keyhash.replace(';', '\\&r01') if keyhash else '' for keyhash in inLineUp.lineUp])
def sqlPlayerToPy(sql: str):
if sql in LeaguePlayers:
return LeaguePlayers[sql]
else:
return str(sql)
def sqlLineUpToPy(sql: str):
lineUp = LineUp()
for keyhash in set([x.replace('\\&r01', ';') for x in sql.split(';')]):
if keyhash in LeaguePlayers:
lineUp[keyhash] = LeaguePlayers[keyhash]
return lineUp
class LeagueTeam(DBStorable, table="teams", live=False):
""" Class that represents a team participating in a league season.
:param TeamName: The name of this team.
:param TeamLeader: The team leader of this team.
:param TeamPlayers: The league livePlayers of this team as line-up.
:param ActiveSeasons: Names of the seasons this team was active.
note:: Author(s): Mitch """
def __init__(self, TeamName: str, ActiveSeasons: set, TeamLeader: LeaguePlayer = None, TeamPlayers: LineUp = None):
if not self.initialised:
self.addConversions(
(pyPlayerToSQL, sqlPlayerToPy),
(pyLineUpToSQL, sqlLineUpToPy)
)
self.STeamName = TeamName, VARCHAR(32), PRIMARY_KEY
self.SActiveSeasons = ActiveSeasons, TINYTEXT
self.STeamLeader = TeamLeader, VARCHAR(32)
if TeamPlayers:
self.STeamPlayers = TeamPlayers, TEXT
else:
self.STeamPlayers = LineUp(), TEXT
self.insertToDB()
def __str__(self):
return self.getTeamName() + "\n\nTeam Leader:\t" + str(self.getTeamLeader()) + "\n\nLine Up:\n\n" +\
str(self.getTeamPlayers())
LeagueTeams = LeagueTeam.storageDict | bfassist/master/league/teams.py | from __future__ import annotations
from bfassist.sql import *
from bfassist.master.league import LeaguePlayers, LeaguePlayer, LineUp
# noinspection PyUnusedLocal
def __preload__(forClient: bool = True):
pass
# noinspection PyUnusedLocal
def __postload__(forClient: bool = True):
pass
def pyPlayerToSQL(inPlayer: LeaguePlayer):
if inPlayer is not None:
return inPlayer.getKeyhash()
else:
return ""
def pyLineUpToSQL(inLineUp: LineUp):
return ";".join([keyhash.replace(';', '\\&r01') if keyhash else '' for keyhash in inLineUp.lineUp])
def sqlPlayerToPy(sql: str):
if sql in LeaguePlayers:
return LeaguePlayers[sql]
else:
return str(sql)
def sqlLineUpToPy(sql: str):
lineUp = LineUp()
for keyhash in set([x.replace('\\&r01', ';') for x in sql.split(';')]):
if keyhash in LeaguePlayers:
lineUp[keyhash] = LeaguePlayers[keyhash]
return lineUp
class LeagueTeam(DBStorable, table="teams", live=False):
""" Class that represents a team participating in a league season.
:param TeamName: The name of this team.
:param TeamLeader: The team leader of this team.
:param TeamPlayers: The league livePlayers of this team as line-up.
:param ActiveSeasons: Names of the seasons this team was active.
note:: Author(s): Mitch """
def __init__(self, TeamName: str, ActiveSeasons: set, TeamLeader: LeaguePlayer = None, TeamPlayers: LineUp = None):
if not self.initialised:
self.addConversions(
(pyPlayerToSQL, sqlPlayerToPy),
(pyLineUpToSQL, sqlLineUpToPy)
)
self.STeamName = TeamName, VARCHAR(32), PRIMARY_KEY
self.SActiveSeasons = ActiveSeasons, TINYTEXT
self.STeamLeader = TeamLeader, VARCHAR(32)
if TeamPlayers:
self.STeamPlayers = TeamPlayers, TEXT
else:
self.STeamPlayers = LineUp(), TEXT
self.insertToDB()
def __str__(self):
return self.getTeamName() + "\n\nTeam Leader:\t" + str(self.getTeamLeader()) + "\n\nLine Up:\n\n" +\
str(self.getTeamPlayers())
LeagueTeams = LeagueTeam.storageDict | 0.551815 | 0.143038 |
import os
from datetime import date
from django.core.management import setup_environ
try:
from scielomanager import settings
except ImportError:
BASE_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
BASE_PATH_APP = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'scielomanager'))
from sys import path
path.append(BASE_PATH)
path.append(BASE_PATH_APP)
import settings
setup_environ(settings)
from django.core import exceptions
from django.db.utils import DatabaseError, IntegrityError
from django.db import transaction
from django.db.models import Q
from django.contrib.auth.models import User
from journalmanager.models import *
import choices
logger = logging.getLogger(__name__)
def _config_logging(logging_level='INFO', logging_file=None):
allowed_levels = {
'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'WARNING': logging.WARNING,
'ERROR': logging.ERROR,
'CRITICAL': logging.CRITICAL
}
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger.setLevel(allowed_levels.get(logging_level, 'INFO'))
if logging_file:
hl = logging.FileHandler(logging_file, mode='a')
else:
hl = logging.StreamHandler()
hl.setFormatter(formatter)
hl.setLevel(allowed_levels.get(logging_level, 'INFO'))
logger.addHandler(hl)
return logger
class Catalog(object):
def __init__(self, collection, user=None):
"""
data must be a Xylose object
"""
self.user = User.objects.get(pk=1) if user is None else user
try:
self.collection = Collection.objects.get(acronym=collection)
except:
raise ValueError('Collection do no exists: %s' % collection)
def _load_language(self, language):
language = Language.objects.get_or_create(
iso_code=language,
name=choices.LANG_DICT.get(language, '###NOT FOUND###')
)[0]
return language
def _load_journal_mission(self, journal, missions):
if missions is None:
return
for language, description in missions.items():
mission = JournalMission()
language = self._load_language(language)
mission.language = language
mission.description = description
journal.missions.add(mission)
def _load_journal_subject_areas(self, journal, areas):
if areas is None:
return
for area in areas:
try:
studyarea = StudyArea.objects.get(study_area=area)
except:
logger.warning('Invalid study area (%s) for the journal (%s), nothing was assigned' % (
area, journal.title)
)
journal.study_areas.add(studyarea)
def _load_journal_textlanguage(self, journal, languages):
if languages is None:
return
for language in languages:
language = self._load_language(language)
journal.languages.add(language)
def _load_journal_abstractlanguage(self, journal, languages):
if languages is None:
return
for language in languages:
language = self._load_language(language)
journal.abstract_keyword_languages.add(language)
def _load_journal_status_history(self, journal, status_history):
## cleanup before deploy the new status history
timeline = JournalTimeline.objects.filter(
journal=journal,
collection=self.collection
).delete()
if status_history is None:
return
for st_date, status, reason in status_history:
if len(st_date) == 4:
st_date += '-01-01'
if len(st_date) == 7:
st_date += '-01'
defaults = {
'created_by': self.user,
}
try:
timeline = JournalTimeline.objects.get_or_create(
journal=journal,
collection=self.collection,
since=st_date,
status=status,
reason=reason,
defaults=defaults)[0]
except exceptions.ValidationError:
logger.warning('Invalid timeline (%s) for the journal (%s), nothing was assigned' % (
', '.join([st_date, status, reason]), journal.title)
)
try:
membership = Membership.objects.get_or_create(
journal=journal,
collection=self.collection,
since=st_date,
status=status,
reason=reason,
defaults=defaults
)
except IntegrityError:
logger.warning('Invalid membership (%s) for the journal (%s), nothing was assigned' % (
', '.join([st_date, status, reason]), journal.title)
)
"""
models.Membership sempre replica o registro salvo para o
JournalTimeline. No momento da importação esse comportamento é
indesejado, para contorná-lo é realizada a exclusão dos registros
inseridos verificando a data da execução da importação
"""
JournalTimeline.objects.filter(
journal=journal,
collection=self.collection,
since__month=date.today().month,
since__year=date.today().year).delete()
def _load_journal_other_titles(self, journal, data):
for title in data.other_titles or []:
title = JournalTitle()
title.title = title
title.category = 'other'
journal.other_titles.add(title)
# NLM/Medline Title
if data.title_nlm:
title = JournalTitle()
title.title = data.title_nlm
title.category = 'abbrev_nlm'
journal.other_titles.add(title)
def _load_journal_use_license(self, journal, permission):
if permission is None:
return
use_license = UseLicense.objects.get_or_create(
license_code=permission['id'].upper())[0]
if 'text' in permission and permission['text']:
use_license.disclaimer = permission['text']
if 'url' in permission and permission['url']:
use_license.reference_url = permission['url']
use_license.save()
journal.use_license = use_license
def _load_journal_sponsor(self, journal, data):
"""
Function: load_sponsor
Retorna um objeto Sponsor() caso a gravação do mesmo em banco de dados for concluida
"""
if data.sponsors is None:
return
for sponsor in data.sponsors:
db_sponsor = Sponsor.objects.get_or_create(name=sponsor)[0]
db_sponsor.collections.add(self.collection)
db_sponsor.save()
journal.sponsor.add(db_sponsor)
def _load_journal_membership(self, journal):
if journal.is_member(self.collection):
return
journal.join(self.collection, self.user)
def _post_save_journal(self, journal, data):
"""
Este método existe para dados que só podem ser associados a um
journal já persisitido, como por exemplo métodos que dependem da
existência de um PK definido.
"""
journal.created = data.creation_date or data.processing_date
journal.updated = data.update_date
self._load_journal_textlanguage(journal, data.languages)
self._load_journal_abstractlanguage(journal, data.abstract_languages)
self._load_journal_subject_areas(journal, data.subject_areas)
self._load_journal_mission(journal, data.mission)
self._load_journal_other_titles(journal, data)
self._load_journal_status_history(journal, data.status_history)
self._load_journal_use_license(journal, data.permissions)
self._load_journal_sponsor(journal, data)
self._load_journal_membership(journal)
try:
journal.save()
except DatabaseError as e:
logger.error(e.message)
transaction.rollback()
except IntegrityError as e:
logger.error(e.message)
transaction.rollback()
@transaction.commit_on_success
def load_journal(self, data):
issns = set()
issns.add(data.scielo_issn)
issns.add(data.print_issn)
issns.add(data.electronic_issn)
try:
journal = Journal.objects.get(
Q(print_issn__in=issns) |
Q(eletronic_issn__in=issns))
logger.info('Journal already exists, skiping journal creation')
return journal
except exceptions.ObjectDoesNotExist:
logger.info('Journal do no exists, creating journal')
logger.info('Importing Journal (%s)' % data.title)
journal = Journal()
journal.creator_id = self.user.pk
journal.collection = self.collection
journal.scielo_issn = 'electronic' if data.scielo_issn == data.electronic_issn else 'print'
journal.print_issn = data.print_issn or ''
journal.eletronic_issn = data.electronic_issn or ''
journal.title = data.title or ''
journal.title_iso = data.abbreviated_iso_title or ''
journal.short_title = data.abbreviated_title or ''
journal.medline_title = data.title_nlm or ''
journal.acronym = data.acronym
journal.subject_descriptors = '\n'.join(data.subject_descriptors or [])
journal.index_coverage = '\n'.join(data.subject_descriptors or [])
journal.copyrighter = data.copyrighter or ''
journal.init_year = data.first_year or ''
journal.init_vol = data.first_volume or ''
journal.init_num = data.first_number or ''
journal.final_year = data.last_year or ''
journal.final_vol = data.last_volume or ''
journal.final_num = data.last_number or ''
journal.cnn_code = data.cnn_code or ''
journal.frequency = data.periodicity[0] if data.periodicity else ''
journal.url_online_submission = data.submission_url or ''
journal.url_journal = data.institutional_url or data.url() or ''
journal.pub_status = data.current_status or ''
journal.editorial_standard = data.editorial_standard[0] if data.editorial_standard else ''
journal.ctrl_vocabulary = data.controlled_vocabulary[0] if data.controlled_vocabulary else ''
journal.pub_level = data.publication_level[0] if data.publication_level else ''
journal.secs_code = data.secs_code or ''
journal.publisher_name = '; '.join(data.publisher_name) if data.publisher_name else ''
journal.publisher_country = data.publisher_country[0] if data.publisher_country else ''
journal.publisher_state = data.publisher_state or ''
journal.publisher_city = data.publisher_city or ''
journal.editor_address = data.editor_address or ''
journal.editor_email = data.editor_email or ''
journal.is_indexed_scie = data.is_indexed_in_scie
journal.is_indexed_ssci = data.is_indexed_in_ssci
journal.is_indexed_aehci = data.is_indexed_in_ahci
try:
journal.save(force_insert=True)
except DatabaseError as e:
logger.error(e.message)
logger.error('Journal (%s) not imported' % (data.title))
transaction.rollback()
return
except IntegrityError as e:
logger.error(e.message)
logger.error('Journal (%s) not imported' % (data.title))
transaction.rollback()
return
self._post_save_journal(journal, data)
logger.info('Journal (%s) created' % data.title)
return journal
def _load_issue_sections(self, issue, sections):
if sections is None:
return None
for code, texts in sections.items():
for language, text in texts.items():
language = self._load_language(language)
try:
section = Section.objects.get(
journal=issue.journal,
legacy_code=code,
)
sectiontitle = SectionTitle.objects.get_or_create(
section=section,
language=language,
title=text
)
except exceptions.ObjectDoesNotExist:
section = Section()
section.legacy_code = code
section.journal = issue.journal
section.save(force_insert=True)
sectiontitle = SectionTitle.objects.get_or_create(
section=section,
language=language,
title=text
)
issue.section.add(section)
def _load_issue_titles(self, issue, titles):
if titles is None:
return None
for language, title in titles.items():
language = self._load_language(language)
issuetitle = IssueTitle()
issuetitle.title = title
issuetitle.issue = issue
issuetitle.language = language
issuetitle.save(force_insert=True)
def _load_issue_use_license(self, issue, permission):
if permission is None:
return None
use_license = UseLicense.objects.get_or_create(
license_code=permission['id'].upper())[0]
if 'text' in permission and permission['text']:
use_license.disclaimer = permission['text']
if 'url' in permission and permission['url']:
use_license.reference_url = permission['url']
use_license.save()
issue.use_license = use_license
def _post_save_issue(self, issue, data):
issue.order = int(data.order)
issue.created = data.creation_date or data.processing_date
issue.updated = data.update_date
self._load_issue_titles(issue, data.titles)
self._load_issue_sections(issue, data.sections)
self._load_issue_use_license(issue, data.permissions)
try:
issue.save(auto_order=False)
except DatabaseError as e:
logger.error(e.message)
transaction.rollback()
except IntegrityError as e:
transaction.rollback()
logger.error(e.message)
transaction.rollback()
def _issue_exists(self, journal, data):
spe = data.number.replace('spe', '') if data.type == 'special' else None
suppl = ' '.join([
data.supplement_volume or '',
data.supplement_number or ''
]).strip() if data.type == 'supplement' else None
try:
issue = Issue.objects.get(
journal=journal,
publication_year=data.publication_date[:4],
volume=data.volume or '',
number=data.number or '',
type=data.type,
spe_text=spe,
suppl_text=suppl)
logger.info('Issue already exists, skiping issue creation')
return issue
except exceptions.ObjectDoesNotExist:
logger.info('Issue do not exists, creating issue')
return None
@transaction.commit_on_success
def load_issue(self, data):
if data.type == 'ahead' or data.type == 'pressrelease':
logger.info('Issue (%s) will not be imported' % data.type)
return
journal = self.load_journal(data.journal)
if not journal:
return
self._load_journal_status_history(journal, data.journal.status_history)
logger.info('Importing Issue (%s)' % (data.label))
try:
issue = self._issue_exists(journal, data) or Issue()
except exceptions.MultipleObjectsReturned as e:
logger.error('Multiple issues found this new issue will not be created')
transaction.rollback()
return
spe = data.number.replace('spe', '') if data.type == 'special' else None
suppl = ' '.join([
data.supplement_volume or '',
data.supplement_number or ''
]).strip() if data.type == 'supplement' else None
issue.journal = journal
issue.publication_year = data.publication_date[:4]
issue.volume = data.volume or ''
issue.number = data.number or ''
issue.type = data.type
if data.type == 'special':
issue.number = data.number.replace('spe', '') if data.number else ''
issue.spe_text = spe
if data.type == 'supplement' and suppl:
issue.suppl_text = suppl
issue.is_press_release = data.is_press_release
issue.total_documents = data.total_documents or 0
issue.publication_start_month = data.start_month or 0
issue.publication_end_month = data.end_month or 0
issue.is_marked_up = data.is_marked_up
issue.ctrl_vocabulary = data.controlled_vocabulary[0] if data.controlled_vocabulary else ''
issue.editorial_standard = data.editorial_standard[0] if data.editorial_standard else ''
try:
issue.save(force_insert=True)
except DatabaseError as e:
logger.error(e.message)
logger.error('Issue (%s) not imported' % (data.label))
transaction.rollback()
return
except IntegrityError as e:
logger.error(e.message)
logger.error('Issue (%s) not imported' % (data.label))
transaction.rollback()
return
self._post_save_issue(issue, data)
logger.info('Issue (%s) created' % (data.label)) | scielomanager/tools/import_data/importer.py | import os
from datetime import date
from django.core.management import setup_environ
try:
from scielomanager import settings
except ImportError:
BASE_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
BASE_PATH_APP = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'scielomanager'))
from sys import path
path.append(BASE_PATH)
path.append(BASE_PATH_APP)
import settings
setup_environ(settings)
from django.core import exceptions
from django.db.utils import DatabaseError, IntegrityError
from django.db import transaction
from django.db.models import Q
from django.contrib.auth.models import User
from journalmanager.models import *
import choices
logger = logging.getLogger(__name__)
def _config_logging(logging_level='INFO', logging_file=None):
allowed_levels = {
'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'WARNING': logging.WARNING,
'ERROR': logging.ERROR,
'CRITICAL': logging.CRITICAL
}
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger.setLevel(allowed_levels.get(logging_level, 'INFO'))
if logging_file:
hl = logging.FileHandler(logging_file, mode='a')
else:
hl = logging.StreamHandler()
hl.setFormatter(formatter)
hl.setLevel(allowed_levels.get(logging_level, 'INFO'))
logger.addHandler(hl)
return logger
class Catalog(object):
def __init__(self, collection, user=None):
"""
data must be a Xylose object
"""
self.user = User.objects.get(pk=1) if user is None else user
try:
self.collection = Collection.objects.get(acronym=collection)
except:
raise ValueError('Collection do no exists: %s' % collection)
def _load_language(self, language):
language = Language.objects.get_or_create(
iso_code=language,
name=choices.LANG_DICT.get(language, '###NOT FOUND###')
)[0]
return language
def _load_journal_mission(self, journal, missions):
if missions is None:
return
for language, description in missions.items():
mission = JournalMission()
language = self._load_language(language)
mission.language = language
mission.description = description
journal.missions.add(mission)
def _load_journal_subject_areas(self, journal, areas):
if areas is None:
return
for area in areas:
try:
studyarea = StudyArea.objects.get(study_area=area)
except:
logger.warning('Invalid study area (%s) for the journal (%s), nothing was assigned' % (
area, journal.title)
)
journal.study_areas.add(studyarea)
def _load_journal_textlanguage(self, journal, languages):
if languages is None:
return
for language in languages:
language = self._load_language(language)
journal.languages.add(language)
def _load_journal_abstractlanguage(self, journal, languages):
if languages is None:
return
for language in languages:
language = self._load_language(language)
journal.abstract_keyword_languages.add(language)
def _load_journal_status_history(self, journal, status_history):
## cleanup before deploy the new status history
timeline = JournalTimeline.objects.filter(
journal=journal,
collection=self.collection
).delete()
if status_history is None:
return
for st_date, status, reason in status_history:
if len(st_date) == 4:
st_date += '-01-01'
if len(st_date) == 7:
st_date += '-01'
defaults = {
'created_by': self.user,
}
try:
timeline = JournalTimeline.objects.get_or_create(
journal=journal,
collection=self.collection,
since=st_date,
status=status,
reason=reason,
defaults=defaults)[0]
except exceptions.ValidationError:
logger.warning('Invalid timeline (%s) for the journal (%s), nothing was assigned' % (
', '.join([st_date, status, reason]), journal.title)
)
try:
membership = Membership.objects.get_or_create(
journal=journal,
collection=self.collection,
since=st_date,
status=status,
reason=reason,
defaults=defaults
)
except IntegrityError:
logger.warning('Invalid membership (%s) for the journal (%s), nothing was assigned' % (
', '.join([st_date, status, reason]), journal.title)
)
"""
models.Membership sempre replica o registro salvo para o
JournalTimeline. No momento da importação esse comportamento é
indesejado, para contorná-lo é realizada a exclusão dos registros
inseridos verificando a data da execução da importação
"""
JournalTimeline.objects.filter(
journal=journal,
collection=self.collection,
since__month=date.today().month,
since__year=date.today().year).delete()
def _load_journal_other_titles(self, journal, data):
for title in data.other_titles or []:
title = JournalTitle()
title.title = title
title.category = 'other'
journal.other_titles.add(title)
# NLM/Medline Title
if data.title_nlm:
title = JournalTitle()
title.title = data.title_nlm
title.category = 'abbrev_nlm'
journal.other_titles.add(title)
def _load_journal_use_license(self, journal, permission):
if permission is None:
return
use_license = UseLicense.objects.get_or_create(
license_code=permission['id'].upper())[0]
if 'text' in permission and permission['text']:
use_license.disclaimer = permission['text']
if 'url' in permission and permission['url']:
use_license.reference_url = permission['url']
use_license.save()
journal.use_license = use_license
def _load_journal_sponsor(self, journal, data):
"""
Function: load_sponsor
Retorna um objeto Sponsor() caso a gravação do mesmo em banco de dados for concluida
"""
if data.sponsors is None:
return
for sponsor in data.sponsors:
db_sponsor = Sponsor.objects.get_or_create(name=sponsor)[0]
db_sponsor.collections.add(self.collection)
db_sponsor.save()
journal.sponsor.add(db_sponsor)
def _load_journal_membership(self, journal):
if journal.is_member(self.collection):
return
journal.join(self.collection, self.user)
def _post_save_journal(self, journal, data):
"""
Este método existe para dados que só podem ser associados a um
journal já persisitido, como por exemplo métodos que dependem da
existência de um PK definido.
"""
journal.created = data.creation_date or data.processing_date
journal.updated = data.update_date
self._load_journal_textlanguage(journal, data.languages)
self._load_journal_abstractlanguage(journal, data.abstract_languages)
self._load_journal_subject_areas(journal, data.subject_areas)
self._load_journal_mission(journal, data.mission)
self._load_journal_other_titles(journal, data)
self._load_journal_status_history(journal, data.status_history)
self._load_journal_use_license(journal, data.permissions)
self._load_journal_sponsor(journal, data)
self._load_journal_membership(journal)
try:
journal.save()
except DatabaseError as e:
logger.error(e.message)
transaction.rollback()
except IntegrityError as e:
logger.error(e.message)
transaction.rollback()
@transaction.commit_on_success
def load_journal(self, data):
issns = set()
issns.add(data.scielo_issn)
issns.add(data.print_issn)
issns.add(data.electronic_issn)
try:
journal = Journal.objects.get(
Q(print_issn__in=issns) |
Q(eletronic_issn__in=issns))
logger.info('Journal already exists, skiping journal creation')
return journal
except exceptions.ObjectDoesNotExist:
logger.info('Journal do no exists, creating journal')
logger.info('Importing Journal (%s)' % data.title)
journal = Journal()
journal.creator_id = self.user.pk
journal.collection = self.collection
journal.scielo_issn = 'electronic' if data.scielo_issn == data.electronic_issn else 'print'
journal.print_issn = data.print_issn or ''
journal.eletronic_issn = data.electronic_issn or ''
journal.title = data.title or ''
journal.title_iso = data.abbreviated_iso_title or ''
journal.short_title = data.abbreviated_title or ''
journal.medline_title = data.title_nlm or ''
journal.acronym = data.acronym
journal.subject_descriptors = '\n'.join(data.subject_descriptors or [])
journal.index_coverage = '\n'.join(data.subject_descriptors or [])
journal.copyrighter = data.copyrighter or ''
journal.init_year = data.first_year or ''
journal.init_vol = data.first_volume or ''
journal.init_num = data.first_number or ''
journal.final_year = data.last_year or ''
journal.final_vol = data.last_volume or ''
journal.final_num = data.last_number or ''
journal.cnn_code = data.cnn_code or ''
journal.frequency = data.periodicity[0] if data.periodicity else ''
journal.url_online_submission = data.submission_url or ''
journal.url_journal = data.institutional_url or data.url() or ''
journal.pub_status = data.current_status or ''
journal.editorial_standard = data.editorial_standard[0] if data.editorial_standard else ''
journal.ctrl_vocabulary = data.controlled_vocabulary[0] if data.controlled_vocabulary else ''
journal.pub_level = data.publication_level[0] if data.publication_level else ''
journal.secs_code = data.secs_code or ''
journal.publisher_name = '; '.join(data.publisher_name) if data.publisher_name else ''
journal.publisher_country = data.publisher_country[0] if data.publisher_country else ''
journal.publisher_state = data.publisher_state or ''
journal.publisher_city = data.publisher_city or ''
journal.editor_address = data.editor_address or ''
journal.editor_email = data.editor_email or ''
journal.is_indexed_scie = data.is_indexed_in_scie
journal.is_indexed_ssci = data.is_indexed_in_ssci
journal.is_indexed_aehci = data.is_indexed_in_ahci
try:
journal.save(force_insert=True)
except DatabaseError as e:
logger.error(e.message)
logger.error('Journal (%s) not imported' % (data.title))
transaction.rollback()
return
except IntegrityError as e:
logger.error(e.message)
logger.error('Journal (%s) not imported' % (data.title))
transaction.rollback()
return
self._post_save_journal(journal, data)
logger.info('Journal (%s) created' % data.title)
return journal
def _load_issue_sections(self, issue, sections):
if sections is None:
return None
for code, texts in sections.items():
for language, text in texts.items():
language = self._load_language(language)
try:
section = Section.objects.get(
journal=issue.journal,
legacy_code=code,
)
sectiontitle = SectionTitle.objects.get_or_create(
section=section,
language=language,
title=text
)
except exceptions.ObjectDoesNotExist:
section = Section()
section.legacy_code = code
section.journal = issue.journal
section.save(force_insert=True)
sectiontitle = SectionTitle.objects.get_or_create(
section=section,
language=language,
title=text
)
issue.section.add(section)
def _load_issue_titles(self, issue, titles):
if titles is None:
return None
for language, title in titles.items():
language = self._load_language(language)
issuetitle = IssueTitle()
issuetitle.title = title
issuetitle.issue = issue
issuetitle.language = language
issuetitle.save(force_insert=True)
def _load_issue_use_license(self, issue, permission):
if permission is None:
return None
use_license = UseLicense.objects.get_or_create(
license_code=permission['id'].upper())[0]
if 'text' in permission and permission['text']:
use_license.disclaimer = permission['text']
if 'url' in permission and permission['url']:
use_license.reference_url = permission['url']
use_license.save()
issue.use_license = use_license
def _post_save_issue(self, issue, data):
issue.order = int(data.order)
issue.created = data.creation_date or data.processing_date
issue.updated = data.update_date
self._load_issue_titles(issue, data.titles)
self._load_issue_sections(issue, data.sections)
self._load_issue_use_license(issue, data.permissions)
try:
issue.save(auto_order=False)
except DatabaseError as e:
logger.error(e.message)
transaction.rollback()
except IntegrityError as e:
transaction.rollback()
logger.error(e.message)
transaction.rollback()
def _issue_exists(self, journal, data):
spe = data.number.replace('spe', '') if data.type == 'special' else None
suppl = ' '.join([
data.supplement_volume or '',
data.supplement_number or ''
]).strip() if data.type == 'supplement' else None
try:
issue = Issue.objects.get(
journal=journal,
publication_year=data.publication_date[:4],
volume=data.volume or '',
number=data.number or '',
type=data.type,
spe_text=spe,
suppl_text=suppl)
logger.info('Issue already exists, skiping issue creation')
return issue
except exceptions.ObjectDoesNotExist:
logger.info('Issue do not exists, creating issue')
return None
@transaction.commit_on_success
def load_issue(self, data):
if data.type == 'ahead' or data.type == 'pressrelease':
logger.info('Issue (%s) will not be imported' % data.type)
return
journal = self.load_journal(data.journal)
if not journal:
return
self._load_journal_status_history(journal, data.journal.status_history)
logger.info('Importing Issue (%s)' % (data.label))
try:
issue = self._issue_exists(journal, data) or Issue()
except exceptions.MultipleObjectsReturned as e:
logger.error('Multiple issues found this new issue will not be created')
transaction.rollback()
return
spe = data.number.replace('spe', '') if data.type == 'special' else None
suppl = ' '.join([
data.supplement_volume or '',
data.supplement_number or ''
]).strip() if data.type == 'supplement' else None
issue.journal = journal
issue.publication_year = data.publication_date[:4]
issue.volume = data.volume or ''
issue.number = data.number or ''
issue.type = data.type
if data.type == 'special':
issue.number = data.number.replace('spe', '') if data.number else ''
issue.spe_text = spe
if data.type == 'supplement' and suppl:
issue.suppl_text = suppl
issue.is_press_release = data.is_press_release
issue.total_documents = data.total_documents or 0
issue.publication_start_month = data.start_month or 0
issue.publication_end_month = data.end_month or 0
issue.is_marked_up = data.is_marked_up
issue.ctrl_vocabulary = data.controlled_vocabulary[0] if data.controlled_vocabulary else ''
issue.editorial_standard = data.editorial_standard[0] if data.editorial_standard else ''
try:
issue.save(force_insert=True)
except DatabaseError as e:
logger.error(e.message)
logger.error('Issue (%s) not imported' % (data.label))
transaction.rollback()
return
except IntegrityError as e:
logger.error(e.message)
logger.error('Issue (%s) not imported' % (data.label))
transaction.rollback()
return
self._post_save_issue(issue, data)
logger.info('Issue (%s) created' % (data.label)) | 0.339061 | 0.084417 |
import copy
class Layer(object):
def __init__(self, medium, thickness, name="Unnamed Layer"):
self.thickness = thickness
self.medium = copy.deepcopy(medium)
self.name = name
self.hooks = {
'pre_update_frequency': []
}
def update_frequency(self, omega):
for f in self.hooks['pre_update_frequency']:
f(self)
self.medium.update_frequency(omega)
def register(self, hook_name):
if self.hooks.get(hook_name) is None:
raise ValueError(f"Invalid hook name. Use one of : {','.join(self.hooks.keys())}")
def decorator(func):
self.hooks[hook_name].append(func)
return decorator
def __str__(self):
return f'{self.name} - {self.thickness}m of {self.medium.name} (self.medium.MEDIUM_TYPE)'
class StochasticLayer(Layer):
def __init__(self, medium, thickness, stochastic_param, pdf, name="Unnamed Layer"):
"""
medium -- medium object
thickness -- layer's thickness
stochastic_param -- name of the stochastic parameter
pdf -- probability density function from which are drawn the random samples
name -- optional layer's name
Please note that the pdf is a **function handle** that must return
a sample per call (and accepts no argument)
"""
super().__init__(medium, thickness, name)
self.stochastic_param = stochastic_param
self.pdf = pdf
# useful for type lookup and guards
self.__medium_params = dict(self.medium.EXPECTED_PARAMS+self.medium.OPT_PARAMS)
if self.stochastic_param == 'thickness':
setattr(self, 'new_draw', self.__draw_thickness)
self.initial_param_value = self.thickness
elif self.stochastic_param in self.__medium_params.keys():
self.initial_param_value = getattr(self.medium, self.stochastic_param)
setattr(self, 'new_draw', self.__draw_medium_parameter)
else:
raise ValueError('Unable to draw a parameter undefined in the layer')
def __draw_thickness(self):
draw = float(self.pdf())
self.thickness = draw
return draw
def __draw_medium_parameter(self):
draw = self.pdf()
expected_type = self.__medium_params[self.stochastic_param]
if type(draw) == expected_type:
setattr(self.medium, self.stochastic_param, draw)
self.medium.omega = -1
else:
raise TypeError(f'Draw of type {type(draw)} but expected type {expected_type}')
return draw
def __str__(self):
return f'{self.name} - {self.thickness}m of {self.medium.name} (self.medium.MEDIUM_TYPE)'
def reinit(self):
if self.stochastic_param == 'thickness':
self.thickness = self.initial_param_value
else:
setattr(self.medium, self.stochastic_param, self.initial_param_value) | pymls/layers/layer.py |
import copy
class Layer(object):
def __init__(self, medium, thickness, name="Unnamed Layer"):
self.thickness = thickness
self.medium = copy.deepcopy(medium)
self.name = name
self.hooks = {
'pre_update_frequency': []
}
def update_frequency(self, omega):
for f in self.hooks['pre_update_frequency']:
f(self)
self.medium.update_frequency(omega)
def register(self, hook_name):
if self.hooks.get(hook_name) is None:
raise ValueError(f"Invalid hook name. Use one of : {','.join(self.hooks.keys())}")
def decorator(func):
self.hooks[hook_name].append(func)
return decorator
def __str__(self):
return f'{self.name} - {self.thickness}m of {self.medium.name} (self.medium.MEDIUM_TYPE)'
class StochasticLayer(Layer):
def __init__(self, medium, thickness, stochastic_param, pdf, name="Unnamed Layer"):
"""
medium -- medium object
thickness -- layer's thickness
stochastic_param -- name of the stochastic parameter
pdf -- probability density function from which are drawn the random samples
name -- optional layer's name
Please note that the pdf is a **function handle** that must return
a sample per call (and accepts no argument)
"""
super().__init__(medium, thickness, name)
self.stochastic_param = stochastic_param
self.pdf = pdf
# useful for type lookup and guards
self.__medium_params = dict(self.medium.EXPECTED_PARAMS+self.medium.OPT_PARAMS)
if self.stochastic_param == 'thickness':
setattr(self, 'new_draw', self.__draw_thickness)
self.initial_param_value = self.thickness
elif self.stochastic_param in self.__medium_params.keys():
self.initial_param_value = getattr(self.medium, self.stochastic_param)
setattr(self, 'new_draw', self.__draw_medium_parameter)
else:
raise ValueError('Unable to draw a parameter undefined in the layer')
def __draw_thickness(self):
draw = float(self.pdf())
self.thickness = draw
return draw
def __draw_medium_parameter(self):
draw = self.pdf()
expected_type = self.__medium_params[self.stochastic_param]
if type(draw) == expected_type:
setattr(self.medium, self.stochastic_param, draw)
self.medium.omega = -1
else:
raise TypeError(f'Draw of type {type(draw)} but expected type {expected_type}')
return draw
def __str__(self):
return f'{self.name} - {self.thickness}m of {self.medium.name} (self.medium.MEDIUM_TYPE)'
def reinit(self):
if self.stochastic_param == 'thickness':
self.thickness = self.initial_param_value
else:
setattr(self.medium, self.stochastic_param, self.initial_param_value) | 0.812682 | 0.317347 |
from Canvas import *
from math import *
def split_cities(filename):
# Reading in a file and parsing the data before storing each specific part of the data in a list of dictionaries.
city = []
f = open(filename, "r")
line = f.readline()
while line != "":
line = line[:-1]
splitlines = line.split()
city = city + [{"x": int(splitlines[0]),
"y": int(splitlines[1]),
"name": splitlines[2]}]
line = f.readline()
f.close()
return city
def draw(cities, length):
# Iterating through each city in the array and finding the corresponding x and y co-ordinate
# The co-ordinates are past as a parameter to the plot points function which draws a point for the city
# Then a line is created from the city just drawn to the next city in the array in order to show the path taken.
city = cities[0]
plot_points(city)
x = city["x"]
y = city["y"]
i = 1
while i < len(cities):
city1 = cities[i]
plot_points(city1)
x1 = city1["x"]
y1 = city1["y"]
create_line(x, y, x1, y1)
x = x1
y = y1
i = i + 1
city1 = cities[0]
x1 = city1["x"]
y1 = city1["y"]
create_line(x, y, x1, y1)
create_text(200, 220, text="Tour length = %d" % length)
def plot_points(city):
# Defining how to plot a point for cities on a canvas.
x = city["x"]
y = city["y"]
name = city["name"]
create_oval(x-3, y-3, x+3, y+3)
create_text(x-3, y+3, text=name)
def nearest_city(cities, i):
# Algorithm to find the next nearest city
# Like a min/max algorithm, iterates through all remaining cities comparing one to the next and storing the nearest.
city = cities[i]
nearest = i + 1
d = distance(city, cities[i+1])
i = i + 1
while i < len(cities) - 1:
newd = distance(city, cities[i+1])
if newd < d:
d = newd
nearest = i + 1
i = i + 1
return nearest
def tour_order(cities):
i = 0
while i < len(cities) - 2:
second = nearest_city(cities, i)
temp = cities[i+1]
cities[i+1] = cities[second]
cities[second] = temp
i = i + 1
def length_tour(cities):
# Iterating through each city to calculate the total length of the tour
# Calling the function distance inside this function to do so.
length = 0
i = 0
while i < len(cities)-1:
length = length + distance(cities[i], cities[i+1])
i = i + 1
length = length + distance(cities[len(cities)-1], cities[0])
return length
def distance(city1, city2):
# Calculating the distance between two cities
x1 = city1["x"]
y1 = city1["y"]
x2 = city2["x"]
y2 = city2["y"]
return int(sqrt((x1-x2)*(x1-x2) + (y1-y2)*(y1-y2)))
cities = split_cities("Cities.txt")
tour_order(cities)
draw(cities, length_tour(cities))
complete() | nearestneighbour.py | from Canvas import *
from math import *
def split_cities(filename):
# Reading in a file and parsing the data before storing each specific part of the data in a list of dictionaries.
city = []
f = open(filename, "r")
line = f.readline()
while line != "":
line = line[:-1]
splitlines = line.split()
city = city + [{"x": int(splitlines[0]),
"y": int(splitlines[1]),
"name": splitlines[2]}]
line = f.readline()
f.close()
return city
def draw(cities, length):
# Iterating through each city in the array and finding the corresponding x and y co-ordinate
# The co-ordinates are past as a parameter to the plot points function which draws a point for the city
# Then a line is created from the city just drawn to the next city in the array in order to show the path taken.
city = cities[0]
plot_points(city)
x = city["x"]
y = city["y"]
i = 1
while i < len(cities):
city1 = cities[i]
plot_points(city1)
x1 = city1["x"]
y1 = city1["y"]
create_line(x, y, x1, y1)
x = x1
y = y1
i = i + 1
city1 = cities[0]
x1 = city1["x"]
y1 = city1["y"]
create_line(x, y, x1, y1)
create_text(200, 220, text="Tour length = %d" % length)
def plot_points(city):
# Defining how to plot a point for cities on a canvas.
x = city["x"]
y = city["y"]
name = city["name"]
create_oval(x-3, y-3, x+3, y+3)
create_text(x-3, y+3, text=name)
def nearest_city(cities, i):
# Algorithm to find the next nearest city
# Like a min/max algorithm, iterates through all remaining cities comparing one to the next and storing the nearest.
city = cities[i]
nearest = i + 1
d = distance(city, cities[i+1])
i = i + 1
while i < len(cities) - 1:
newd = distance(city, cities[i+1])
if newd < d:
d = newd
nearest = i + 1
i = i + 1
return nearest
def tour_order(cities):
i = 0
while i < len(cities) - 2:
second = nearest_city(cities, i)
temp = cities[i+1]
cities[i+1] = cities[second]
cities[second] = temp
i = i + 1
def length_tour(cities):
# Iterating through each city to calculate the total length of the tour
# Calling the function distance inside this function to do so.
length = 0
i = 0
while i < len(cities)-1:
length = length + distance(cities[i], cities[i+1])
i = i + 1
length = length + distance(cities[len(cities)-1], cities[0])
return length
def distance(city1, city2):
# Calculating the distance between two cities
x1 = city1["x"]
y1 = city1["y"]
x2 = city2["x"]
y2 = city2["y"]
return int(sqrt((x1-x2)*(x1-x2) + (y1-y2)*(y1-y2)))
cities = split_cities("Cities.txt")
tour_order(cities)
draw(cities, length_tour(cities))
complete() | 0.532425 | 0.515986 |
import os
from PyQt5.QtGui import QIcon, QCloseEvent
from PyQt5.QtWidgets import QDesktopWidget, QMainWindow, QStackedWidget, QFileDialog, QWidget
from src.controller_factory import ControllerFactory
from src.message_type import MessageType
from src.rocket_packet.rocket_packet_parser_factory import RocketPacketVersionException
from src.ui import utils
from src.ui.configdialog import ConfigDialog
from src.ui.console_message_listener import ConsoleMessageListener
from src.ui.homewidget import HomeWidget
from src.ui.menu_bar import MenuBar
from src.ui.real_time_widget import RealTimeWidget
from src.ui.replay_widget import ReplayWidget
from src.ui.status_bar import StatusBar
from src.ui.tabs_widget import TabsWidget
from src.ui.motor_widget import MotorWidget
class MainWindow(QMainWindow):
def __init__(self, parent=None):
super().__init__(parent)
self.central_widget = QStackedWidget()
self.setCentralWidget(self.central_widget)
self.home_widget = HomeWidget(self.new_acquisition, self.load_flight_data, self)
self.real_time_widget = RealTimeWidget(self)
self.replay_widget = ReplayWidget(self)
self.motor_widget = MotorWidget(self)
self.tab_widget = TabsWidget(self)
self.central_widget.addWidget(self.home_widget)
self.central_widget.addWidget(self.tab_widget)
self.controller_factory = ControllerFactory()
self.active_controller = None
self.real_time_controller = None
self.replay_controller = None
self.status_bar = StatusBar(self)
self.setStatusBar(self.status_bar)
self.menu_bar = self.create_menu_bar()
self.config_dialog = None
self.console = ConsoleMessageListener()
self.setWindowIcon(QIcon("src/resources/logo.jpg"))
self.setWindowTitle("GAUL BaseStation")
self.set_stylesheet("src/resources/mainwindow.css")
def create_menu_bar(self):
menu_bar = MenuBar(self)
menu_bar.set_new_acquisition_callback(self.new_acquisition)
menu_bar.set_save_as_callback(self.save_as)
menu_bar.set_load_flight_data_callback(self.load_flight_data)
menu_bar.set_open_simulation_callback(self.add_simulation)
menu_bar.set_edit_preferences_callback(self.open_preferences)
menu_bar.set_on_exit_callback(self.close)
self.setMenuBar(menu_bar)
return menu_bar
def new_acquisition(self):
deactivated = True
if self.active_controller is not None:
deactivated = self.active_controller.deactivate()
if deactivated:
try:
self.open_real_time()
self.active_controller.activate("")
except RocketPacketVersionException as error:
self.status_bar.notify(str(error), MessageType.ERROR)
def save_as(self): # TODO
pass
def load_flight_data(self):
filename, _ = QFileDialog.getOpenFileName(caption="Open File", directory="./src/resources/",
filter="All Files (*);; CSV Files (*.csv)")
if filename:
deactivated = True
if self.active_controller is not None:
deactivated = self.active_controller.deactivate()
if deactivated:
self.open_replay()
self.active_controller.activate(filename)
def add_simulation(self):
filename, _ = QFileDialog.getOpenFileName(caption="Open File", directory="./src/resources/",
filter="All Files (*);; CSV Files (*.csv)")
if filename:
self.active_controller.add_open_rocket_simulation(filename)
def open_preferences(self):
config_path = os.path.join(os.getcwd(), "config.ini")
if self.config_dialog is None:
self.config_dialog = ConfigDialog(self)
self.config_dialog.open(config_path)
def open_real_time(self):
if self.real_time_controller is None:
self.real_time_controller = self.controller_factory.create_real_time_controller(self.real_time_widget,
self.motor_widget,
self.console)
self.real_time_controller.register_message_listener(self.status_bar)
self.active_controller = self.real_time_controller
self.tab_widget.clearTabs()
self.tab_widget.addWidget(self.real_time_widget, "General")
self.open_widget(self.tab_widget)
self.menu_bar.set_real_time_mode()
def open_replay(self):
if self.replay_controller is None:
self.replay_controller = self.controller_factory.create_replay_controller(self.replay_widget,
self.motor_widget)
self.replay_controller.register_message_listener(self.status_bar)
self.active_controller = self.replay_controller
self.tab_widget.clearTabs()
self.tab_widget.addWidget(self.replay_widget, "General")
self.tab_widget.addWidget(self.motor_widget, "Motor")
self.open_widget(self.tab_widget)
self.menu_bar.set_replay_mode()
def open_widget(self, widget: QWidget):
self.central_widget.setCurrentWidget(widget)
self.set_stylesheet("src/resources/data_widget.css")
self.showMaximized()
self.status_bar.clear()
def set_stylesheet(self, stylesheet_path):
stylesheet = utils.read_stylesheet(stylesheet_path)
self.setStyleSheet(stylesheet)
def closeEvent(self, event: QCloseEvent):
if self.active_controller is not None:
self.active_controller.on_close(event)
else:
event.accept()
def center(self):
window_geometry = self.frameGeometry()
screen_center_point = QDesktopWidget().screenGeometry(self).center()
window_geometry.moveCenter(screen_center_point)
self.move(window_geometry.topLeft()) | BaseStation/src/ui/mainwindow.py | import os
from PyQt5.QtGui import QIcon, QCloseEvent
from PyQt5.QtWidgets import QDesktopWidget, QMainWindow, QStackedWidget, QFileDialog, QWidget
from src.controller_factory import ControllerFactory
from src.message_type import MessageType
from src.rocket_packet.rocket_packet_parser_factory import RocketPacketVersionException
from src.ui import utils
from src.ui.configdialog import ConfigDialog
from src.ui.console_message_listener import ConsoleMessageListener
from src.ui.homewidget import HomeWidget
from src.ui.menu_bar import MenuBar
from src.ui.real_time_widget import RealTimeWidget
from src.ui.replay_widget import ReplayWidget
from src.ui.status_bar import StatusBar
from src.ui.tabs_widget import TabsWidget
from src.ui.motor_widget import MotorWidget
class MainWindow(QMainWindow):
def __init__(self, parent=None):
super().__init__(parent)
self.central_widget = QStackedWidget()
self.setCentralWidget(self.central_widget)
self.home_widget = HomeWidget(self.new_acquisition, self.load_flight_data, self)
self.real_time_widget = RealTimeWidget(self)
self.replay_widget = ReplayWidget(self)
self.motor_widget = MotorWidget(self)
self.tab_widget = TabsWidget(self)
self.central_widget.addWidget(self.home_widget)
self.central_widget.addWidget(self.tab_widget)
self.controller_factory = ControllerFactory()
self.active_controller = None
self.real_time_controller = None
self.replay_controller = None
self.status_bar = StatusBar(self)
self.setStatusBar(self.status_bar)
self.menu_bar = self.create_menu_bar()
self.config_dialog = None
self.console = ConsoleMessageListener()
self.setWindowIcon(QIcon("src/resources/logo.jpg"))
self.setWindowTitle("GAUL BaseStation")
self.set_stylesheet("src/resources/mainwindow.css")
def create_menu_bar(self):
menu_bar = MenuBar(self)
menu_bar.set_new_acquisition_callback(self.new_acquisition)
menu_bar.set_save_as_callback(self.save_as)
menu_bar.set_load_flight_data_callback(self.load_flight_data)
menu_bar.set_open_simulation_callback(self.add_simulation)
menu_bar.set_edit_preferences_callback(self.open_preferences)
menu_bar.set_on_exit_callback(self.close)
self.setMenuBar(menu_bar)
return menu_bar
def new_acquisition(self):
deactivated = True
if self.active_controller is not None:
deactivated = self.active_controller.deactivate()
if deactivated:
try:
self.open_real_time()
self.active_controller.activate("")
except RocketPacketVersionException as error:
self.status_bar.notify(str(error), MessageType.ERROR)
def save_as(self): # TODO
pass
def load_flight_data(self):
filename, _ = QFileDialog.getOpenFileName(caption="Open File", directory="./src/resources/",
filter="All Files (*);; CSV Files (*.csv)")
if filename:
deactivated = True
if self.active_controller is not None:
deactivated = self.active_controller.deactivate()
if deactivated:
self.open_replay()
self.active_controller.activate(filename)
def add_simulation(self):
filename, _ = QFileDialog.getOpenFileName(caption="Open File", directory="./src/resources/",
filter="All Files (*);; CSV Files (*.csv)")
if filename:
self.active_controller.add_open_rocket_simulation(filename)
def open_preferences(self):
config_path = os.path.join(os.getcwd(), "config.ini")
if self.config_dialog is None:
self.config_dialog = ConfigDialog(self)
self.config_dialog.open(config_path)
def open_real_time(self):
if self.real_time_controller is None:
self.real_time_controller = self.controller_factory.create_real_time_controller(self.real_time_widget,
self.motor_widget,
self.console)
self.real_time_controller.register_message_listener(self.status_bar)
self.active_controller = self.real_time_controller
self.tab_widget.clearTabs()
self.tab_widget.addWidget(self.real_time_widget, "General")
self.open_widget(self.tab_widget)
self.menu_bar.set_real_time_mode()
def open_replay(self):
if self.replay_controller is None:
self.replay_controller = self.controller_factory.create_replay_controller(self.replay_widget,
self.motor_widget)
self.replay_controller.register_message_listener(self.status_bar)
self.active_controller = self.replay_controller
self.tab_widget.clearTabs()
self.tab_widget.addWidget(self.replay_widget, "General")
self.tab_widget.addWidget(self.motor_widget, "Motor")
self.open_widget(self.tab_widget)
self.menu_bar.set_replay_mode()
def open_widget(self, widget: QWidget):
self.central_widget.setCurrentWidget(widget)
self.set_stylesheet("src/resources/data_widget.css")
self.showMaximized()
self.status_bar.clear()
def set_stylesheet(self, stylesheet_path):
stylesheet = utils.read_stylesheet(stylesheet_path)
self.setStyleSheet(stylesheet)
def closeEvent(self, event: QCloseEvent):
if self.active_controller is not None:
self.active_controller.on_close(event)
else:
event.accept()
def center(self):
window_geometry = self.frameGeometry()
screen_center_point = QDesktopWidget().screenGeometry(self).center()
window_geometry.moveCenter(screen_center_point)
self.move(window_geometry.topLeft()) | 0.212968 | 0.060863 |
import unittest
import webtest
import json
from models.questions import QuestionRepository
from handlers.items.items_json import ItemsJSON
from handlers.items.new_item import NewItem
from handlers.items.add_more import AddMore
from handlers.items.update import Update
from handlers.helpers import Ready
import main
class ItemsTestCase(unittest.TestCase):
TEST_ITEM = "Content for question "
HEADER_FOR_ITEM = "Header for question "
NUMBER_OF_QUESTIONS = 100
PAGE_SIZE = 40
def retrieve_first_n_items(self, number_of_items):
response = self.testapp.get('%s?n=%d' % (ItemsJSON.URL, number_of_items))
self.assertEqual(response.status_int, 200)
items = response.json['old']
self.assertIsNotNone(items)
return items
def retrieve_first_n_items_before_item_with_id(self, item_id, number_of_items):
response = self.testapp.get("%s?before=%d&n=%d" % (ItemsJSON.URL, item_id, number_of_items))
self.assertEqual(response.status_int, 200)
items = response.json['old']
self.assertIsNotNone(items)
return items
def retrieve_first_n_items_updated_after_given_timestamp(self, n, timestamp):
response = self.testapp.get("%s?n=%d×tamp=%f" % (ItemsJSON.URL, n, timestamp))
self.assertEqual(response.status_int, 200)
items = response.json['new']
self.assertIsNotNone(items)
return items
def retrieve_all_items(self):
response = self.testapp.get(ItemsJSON.URL)
self.assertEqual(response.status_int, 200)
items = response.json['old']
self.assertIsNotNone(items)
return items
def add_more_items(self, number_of_items=None):
if not number_of_items:
response = self.testapp.get(AddMore.URL)
else:
response = self.testapp.get('%s?n=%d' % (AddMore.URL, number_of_items))
self.assertEqual(response.status_int, 200)
self.assertEqual(response.body, "OK")
def update_items(self, items_ids):
request_url = '%s?' % Update.URL
param_separator = ''
for item_id in items_ids:
request_url += '%sid=%d' % (param_separator, item_id)
param_separator = '&'
response = self.testapp.get(request_url)
self.assertEqual(response.status_int, 200)
self.assertEqual(response.body, "OK")
def setUp(self):
QuestionRepository.create_table()
QuestionRepository.truncate()
self.testapp = webtest.TestApp(main.app)
def test_checking_that_app_is_ready(self):
response = self.testapp.get(Ready.URL)
self.assertEqual(response.status_int, 200)
self.assertEqual(response.body, "OK")
def test_each_question_has_unique_id(self):
QuestionRepository.populate(2)
items = self.retrieve_all_items()
self.assertEqual(len(items), 2)
self.assertNotEqual(items[0][u'id'], items[1][u'id'])
def test_each_question_has_timestamp_encoded_as_float(self):
QuestionRepository.populate(2)
items = self.retrieve_all_items()
self.assertEqual(len(items), 2)
self.assertEqual(len(items), 2)
self.assertGreater(items[0][u'created'],
items[1][u'created'])
def test_each_question_has_updated_field_that_is_equal_to_timestamp_field(self):
QuestionRepository.populate(1)
items = self.retrieve_all_items()
self.assertEqual(len(items), 1)
self.assertEqual(len(items), 1)
self.assertEqual(items[0][u'created'],
items[0][u'updated'])
def test_retrieving_items_using_json(self):
QuestionRepository.populate(5)
items = self.retrieve_all_items()
self.assertEqual(len(items), 5)
self.assertEqual(len(items), 5)
for record_index, record in enumerate(items):
self.assertEqual(record[u'content'], "%s%d" % (self.TEST_ITEM, 5 - record_index - 1),
"record_index:%d, record:%s" % (record_index, record))
self.assertEqual(record[u'header'], "%s%d" % (self.HEADER_FOR_ITEM, 5 - record_index - 1),
"record_index:%d, record:%s" % (record_index, record))
def test_retrieving_first_n_items_posted_before_item_with_given_id(self):
QuestionRepository.populate(15)
items = self.retrieve_first_n_items(5)
self.assertEqual(len(items), 5)
oldest_item_id = items[4][u'id']
self.assertEqual('11', oldest_item_id)
items = self.retrieve_first_n_items_before_item_with_id(int(oldest_item_id), 5)
self.assertEqual(len(items), 5)
self.assertEqual('10', items[0][u'id'])
self.assertEqual('6', items[4][u'id'])
def test_retrieving_first_n_items(self):
QuestionRepository.populate(20)
items = self.retrieve_first_n_items(10)
self.assertEqual(len(items), 10)
def test_retrieving_first_n_items_when_number_of_available_items_less_than_n(self):
QuestionRepository.populate(5)
items = self.retrieve_first_n_items(10)
self.assertEqual(len(items), 5)
def test_posting_new_item_using_json(self):
json_item = [{'header': 'new item header', 'content': 'new item'}]
response = self.testapp.post(NewItem.URL, json.dumps(json_item),
{'Content-Type': 'application/json'})
self.assertEqual(response.status_int, 200)
items = self.retrieve_all_items()
self.assertEqual(len(items), 1)
item = items[0]
self.assertEqual(item[u'content'], "new item")
self.assertEqual(item[u'header'], "new item header")
def test_retrieving_first_n_items_posted_after_item_with_given_id(self):
QuestionRepository.populate(10)
items = self.retrieve_first_n_items(5)
most_recent_item_id = items[0][u'id']
most_recent_item_updated = items[0][u'updated']
self.assertEqual('10', most_recent_item_id)
oldest_item_id = items[4][u'id']
self.assertEqual('6', oldest_item_id)
number_of_new_items = 5
QuestionRepository.populate(number_of_new_items)
new_items = self.retrieve_first_n_items_updated_after_given_timestamp(number_of_new_items,
most_recent_item_updated)
self.assertEqual(len(new_items), number_of_new_items)
self.assertEqual('11', new_items[0][u'id'])
self.assertEqual('15', new_items[4][u'id'])
def test_retrieving_updated_items(self):
QuestionRepository.populate(10)
items = self.retrieve_first_n_items(5)
most_recent_item_id = items[0][u'id']
most_recent_item_updated = items[0][u'updated']
self.assertEqual('10', most_recent_item_id)
oldest_item_id = items[4][u'id']
self.assertEqual('6', oldest_item_id)
number_of_new_items = 5
QuestionRepository.populate(number_of_new_items)
list_of_items_to_be_updated = [6, 8, 10]
QuestionRepository.update_items(list_of_items_to_be_updated)
new_items = self.retrieve_first_n_items_updated_after_given_timestamp(number_of_new_items,
most_recent_item_updated)
self.assertEqual(len(new_items), number_of_new_items)
self.assertEqual('11', new_items[0][u'id'])
self.assertEqual('15', new_items[4][u'id'])
new_items = self.retrieve_first_n_items_updated_after_given_timestamp(len(list_of_items_to_be_updated),
new_items[4][u'updated'])
self.assertEqual(len(new_items), len(list_of_items_to_be_updated))
self.assertIn(int(new_items[0][u'id']), list_of_items_to_be_updated)
self.assertIn(int(new_items[1][u'id']), list_of_items_to_be_updated)
self.assertIn(int(new_items[2][u'id']), list_of_items_to_be_updated)
def test_retrieving_updated_items_when_no_new_items_have_been_added(self):
QuestionRepository.populate(10)
items = self.retrieve_first_n_items(5)
most_recent_item_updated = items[0][u'updated']
most_recent_item_id = items[0][u'id']
self.assertEqual('10', most_recent_item_id)
oldest_item_id = items[4][u'id']
self.assertEqual('6', oldest_item_id)
list_of_items_to_be_updated = [6, 8, 10]
QuestionRepository.update_items(list_of_items_to_be_updated)
new_items = self.retrieve_first_n_items_updated_after_given_timestamp(5,
most_recent_item_updated)
self.assertEqual(len(new_items), len(list_of_items_to_be_updated))
self.assertIn(int(new_items[0][u'id']), list_of_items_to_be_updated)
self.assertIn(int(new_items[1][u'id']), list_of_items_to_be_updated)
self.assertIn(int(new_items[2][u'id']), list_of_items_to_be_updated)
def test_adding_an_answer_to_a_question(self):
QuestionRepository.populate(2)
items = self.retrieve_all_items()
self.assertEqual(len(items), 2)
item = items[0]
self.assertIsNone(item[u'answer'])
QuestionRepository.update_item(item[u'id'], item[u'header'], item[u'content'], "New Answer")
items = self.retrieve_all_items()
item = items[0]
self.assertIsNotNone(item[u'answer'])
self.assertEqual("New Answer", item[u'answer'])
self.assertGreater(item[u'updated'], item[u'created'])
def test_adding_more_items_to_the_database_using_add_more_get_request_with_default_number_of_items(self):
QuestionRepository.populate(10)
items = self.retrieve_all_items()
self.assertEqual(len(items), 10)
item = items[0]
self.assertEqual('10', item[u'id'])
self.add_more_items()
items = self.retrieve_all_items()
self.assertEqual(len(items), 20)
item = items[0]
self.assertEqual('20', item[u'id'])
def test_adding_more_items_to_the_database_using_add_more_get_request_with_explicit_number_of_items(self):
QuestionRepository.populate(10)
items = self.retrieve_all_items()
self.assertEqual(len(items), 10)
item = items[0]
self.assertEqual('10', item[u'id'])
self.add_more_items(50)
items = self.retrieve_all_items()
self.assertEqual(len(items), 60)
item = items[0]
self.assertEqual('60', item[u'id'])
def test_updating_selected_items_using_get_request(self):
QuestionRepository.populate(5)
updated = ItemsJSON.timestamp_to_float(QuestionRepository.all().get().updated)
self.update_items([5, 3, 1])
new_items = self.retrieve_first_n_items_updated_after_given_timestamp(5, updated)
self.assertEqual(3, len(new_items)) | Support/GoogleAppEngineAppMock/handlers/items/tests/test_items.py | import unittest
import webtest
import json
from models.questions import QuestionRepository
from handlers.items.items_json import ItemsJSON
from handlers.items.new_item import NewItem
from handlers.items.add_more import AddMore
from handlers.items.update import Update
from handlers.helpers import Ready
import main
class ItemsTestCase(unittest.TestCase):
TEST_ITEM = "Content for question "
HEADER_FOR_ITEM = "Header for question "
NUMBER_OF_QUESTIONS = 100
PAGE_SIZE = 40
def retrieve_first_n_items(self, number_of_items):
response = self.testapp.get('%s?n=%d' % (ItemsJSON.URL, number_of_items))
self.assertEqual(response.status_int, 200)
items = response.json['old']
self.assertIsNotNone(items)
return items
def retrieve_first_n_items_before_item_with_id(self, item_id, number_of_items):
response = self.testapp.get("%s?before=%d&n=%d" % (ItemsJSON.URL, item_id, number_of_items))
self.assertEqual(response.status_int, 200)
items = response.json['old']
self.assertIsNotNone(items)
return items
def retrieve_first_n_items_updated_after_given_timestamp(self, n, timestamp):
response = self.testapp.get("%s?n=%d×tamp=%f" % (ItemsJSON.URL, n, timestamp))
self.assertEqual(response.status_int, 200)
items = response.json['new']
self.assertIsNotNone(items)
return items
def retrieve_all_items(self):
response = self.testapp.get(ItemsJSON.URL)
self.assertEqual(response.status_int, 200)
items = response.json['old']
self.assertIsNotNone(items)
return items
def add_more_items(self, number_of_items=None):
if not number_of_items:
response = self.testapp.get(AddMore.URL)
else:
response = self.testapp.get('%s?n=%d' % (AddMore.URL, number_of_items))
self.assertEqual(response.status_int, 200)
self.assertEqual(response.body, "OK")
def update_items(self, items_ids):
request_url = '%s?' % Update.URL
param_separator = ''
for item_id in items_ids:
request_url += '%sid=%d' % (param_separator, item_id)
param_separator = '&'
response = self.testapp.get(request_url)
self.assertEqual(response.status_int, 200)
self.assertEqual(response.body, "OK")
def setUp(self):
QuestionRepository.create_table()
QuestionRepository.truncate()
self.testapp = webtest.TestApp(main.app)
def test_checking_that_app_is_ready(self):
response = self.testapp.get(Ready.URL)
self.assertEqual(response.status_int, 200)
self.assertEqual(response.body, "OK")
def test_each_question_has_unique_id(self):
QuestionRepository.populate(2)
items = self.retrieve_all_items()
self.assertEqual(len(items), 2)
self.assertNotEqual(items[0][u'id'], items[1][u'id'])
def test_each_question_has_timestamp_encoded_as_float(self):
QuestionRepository.populate(2)
items = self.retrieve_all_items()
self.assertEqual(len(items), 2)
self.assertEqual(len(items), 2)
self.assertGreater(items[0][u'created'],
items[1][u'created'])
def test_each_question_has_updated_field_that_is_equal_to_timestamp_field(self):
QuestionRepository.populate(1)
items = self.retrieve_all_items()
self.assertEqual(len(items), 1)
self.assertEqual(len(items), 1)
self.assertEqual(items[0][u'created'],
items[0][u'updated'])
def test_retrieving_items_using_json(self):
QuestionRepository.populate(5)
items = self.retrieve_all_items()
self.assertEqual(len(items), 5)
self.assertEqual(len(items), 5)
for record_index, record in enumerate(items):
self.assertEqual(record[u'content'], "%s%d" % (self.TEST_ITEM, 5 - record_index - 1),
"record_index:%d, record:%s" % (record_index, record))
self.assertEqual(record[u'header'], "%s%d" % (self.HEADER_FOR_ITEM, 5 - record_index - 1),
"record_index:%d, record:%s" % (record_index, record))
def test_retrieving_first_n_items_posted_before_item_with_given_id(self):
QuestionRepository.populate(15)
items = self.retrieve_first_n_items(5)
self.assertEqual(len(items), 5)
oldest_item_id = items[4][u'id']
self.assertEqual('11', oldest_item_id)
items = self.retrieve_first_n_items_before_item_with_id(int(oldest_item_id), 5)
self.assertEqual(len(items), 5)
self.assertEqual('10', items[0][u'id'])
self.assertEqual('6', items[4][u'id'])
def test_retrieving_first_n_items(self):
QuestionRepository.populate(20)
items = self.retrieve_first_n_items(10)
self.assertEqual(len(items), 10)
def test_retrieving_first_n_items_when_number_of_available_items_less_than_n(self):
QuestionRepository.populate(5)
items = self.retrieve_first_n_items(10)
self.assertEqual(len(items), 5)
def test_posting_new_item_using_json(self):
json_item = [{'header': 'new item header', 'content': 'new item'}]
response = self.testapp.post(NewItem.URL, json.dumps(json_item),
{'Content-Type': 'application/json'})
self.assertEqual(response.status_int, 200)
items = self.retrieve_all_items()
self.assertEqual(len(items), 1)
item = items[0]
self.assertEqual(item[u'content'], "new item")
self.assertEqual(item[u'header'], "new item header")
def test_retrieving_first_n_items_posted_after_item_with_given_id(self):
QuestionRepository.populate(10)
items = self.retrieve_first_n_items(5)
most_recent_item_id = items[0][u'id']
most_recent_item_updated = items[0][u'updated']
self.assertEqual('10', most_recent_item_id)
oldest_item_id = items[4][u'id']
self.assertEqual('6', oldest_item_id)
number_of_new_items = 5
QuestionRepository.populate(number_of_new_items)
new_items = self.retrieve_first_n_items_updated_after_given_timestamp(number_of_new_items,
most_recent_item_updated)
self.assertEqual(len(new_items), number_of_new_items)
self.assertEqual('11', new_items[0][u'id'])
self.assertEqual('15', new_items[4][u'id'])
def test_retrieving_updated_items(self):
QuestionRepository.populate(10)
items = self.retrieve_first_n_items(5)
most_recent_item_id = items[0][u'id']
most_recent_item_updated = items[0][u'updated']
self.assertEqual('10', most_recent_item_id)
oldest_item_id = items[4][u'id']
self.assertEqual('6', oldest_item_id)
number_of_new_items = 5
QuestionRepository.populate(number_of_new_items)
list_of_items_to_be_updated = [6, 8, 10]
QuestionRepository.update_items(list_of_items_to_be_updated)
new_items = self.retrieve_first_n_items_updated_after_given_timestamp(number_of_new_items,
most_recent_item_updated)
self.assertEqual(len(new_items), number_of_new_items)
self.assertEqual('11', new_items[0][u'id'])
self.assertEqual('15', new_items[4][u'id'])
new_items = self.retrieve_first_n_items_updated_after_given_timestamp(len(list_of_items_to_be_updated),
new_items[4][u'updated'])
self.assertEqual(len(new_items), len(list_of_items_to_be_updated))
self.assertIn(int(new_items[0][u'id']), list_of_items_to_be_updated)
self.assertIn(int(new_items[1][u'id']), list_of_items_to_be_updated)
self.assertIn(int(new_items[2][u'id']), list_of_items_to_be_updated)
def test_retrieving_updated_items_when_no_new_items_have_been_added(self):
QuestionRepository.populate(10)
items = self.retrieve_first_n_items(5)
most_recent_item_updated = items[0][u'updated']
most_recent_item_id = items[0][u'id']
self.assertEqual('10', most_recent_item_id)
oldest_item_id = items[4][u'id']
self.assertEqual('6', oldest_item_id)
list_of_items_to_be_updated = [6, 8, 10]
QuestionRepository.update_items(list_of_items_to_be_updated)
new_items = self.retrieve_first_n_items_updated_after_given_timestamp(5,
most_recent_item_updated)
self.assertEqual(len(new_items), len(list_of_items_to_be_updated))
self.assertIn(int(new_items[0][u'id']), list_of_items_to_be_updated)
self.assertIn(int(new_items[1][u'id']), list_of_items_to_be_updated)
self.assertIn(int(new_items[2][u'id']), list_of_items_to_be_updated)
def test_adding_an_answer_to_a_question(self):
QuestionRepository.populate(2)
items = self.retrieve_all_items()
self.assertEqual(len(items), 2)
item = items[0]
self.assertIsNone(item[u'answer'])
QuestionRepository.update_item(item[u'id'], item[u'header'], item[u'content'], "New Answer")
items = self.retrieve_all_items()
item = items[0]
self.assertIsNotNone(item[u'answer'])
self.assertEqual("New Answer", item[u'answer'])
self.assertGreater(item[u'updated'], item[u'created'])
def test_adding_more_items_to_the_database_using_add_more_get_request_with_default_number_of_items(self):
QuestionRepository.populate(10)
items = self.retrieve_all_items()
self.assertEqual(len(items), 10)
item = items[0]
self.assertEqual('10', item[u'id'])
self.add_more_items()
items = self.retrieve_all_items()
self.assertEqual(len(items), 20)
item = items[0]
self.assertEqual('20', item[u'id'])
def test_adding_more_items_to_the_database_using_add_more_get_request_with_explicit_number_of_items(self):
QuestionRepository.populate(10)
items = self.retrieve_all_items()
self.assertEqual(len(items), 10)
item = items[0]
self.assertEqual('10', item[u'id'])
self.add_more_items(50)
items = self.retrieve_all_items()
self.assertEqual(len(items), 60)
item = items[0]
self.assertEqual('60', item[u'id'])
def test_updating_selected_items_using_get_request(self):
QuestionRepository.populate(5)
updated = ItemsJSON.timestamp_to_float(QuestionRepository.all().get().updated)
self.update_items([5, 3, 1])
new_items = self.retrieve_first_n_items_updated_after_given_timestamp(5, updated)
self.assertEqual(3, len(new_items)) | 0.490968 | 0.331904 |
from __future__ import print_function
import os
import shutil
import sys
import time
from unittest import TestCase
import unittest
from sitcpy.cui import CuiServer, SessionThread, CommandClient
from sitcpy.rbcp_server import default_pseudo_arg_parser, RbcpServer,\
PseudoDevice
from sitcpy.templates.cui_project import daq
from sitcpy.templates.cui_project.daq import DaqCommandHandler
from sitcpy.templates.cui_project.pseudo import PseudoDataGenerator,\
PseudoRbcpCommandHandler
PROMPT = "daq$ "
class DaqTest(TestCase):
def setUp(self):
# pseudo
args = default_pseudo_arg_parser().parse_args([])
command_port = args.port
data_port = args.dataport
rbcp_server = RbcpServer()
data_generator = PseudoDataGenerator()
rbcp_command_handler = PseudoRbcpCommandHandler("pdev$ ")
rbcp_command_handler.bind(rbcp_server, data_generator)
self.pdev = PseudoDevice(rbcp_command_handler, data_generator,
rbcp_server, command_port, data_port)
self.pdev.start()
# daq
self.run_no_txt_path = os.path.join(
os.path.dirname(daq.__file__), "run_no.txt")
if os.path.isfile(self.run_no_txt_path):
os.remove(self.run_no_txt_path)
self.log_dir = "log"
shutil.rmtree(self.log_dir, ignore_errors=True)
self.handler = DaqCommandHandler(PROMPT)
self.server = CuiServer(SessionThread, self.handler, 5050)
self.server.start()
self.cli = CommandClient(PROMPT, "localhost", 5050)
def tearDown(self):
self.cli.close()
self.server.stop()
self.server.join()
self.pdev.stop()
if os.path.isfile(self.run_no_txt_path):
os.remove(self.run_no_txt_path)
shutil.rmtree(self.log_dir, ignore_errors=True)
def test_commands(self):
res = self.cli.send_command("reload").strip()
self.assertTrue(res.startswith("OK:"))
res = self.cli.send_command("reload _file_not_found_error_").strip()
self.assertTrue(res.startswith("NG:"))
# stat
res = self.cli.send_command("stat").strip()
self.assertTrue(len(res.splitlines()) > 5)
for val in res.splitlines():
if val:
self.assertEqual(len(val.split("=", 1)), 2)
# rawsave
res = self.cli.send_command("rawsave").strip()
self.assertEqual(res, "off")
res = self.cli.send_command("rawsave off").strip()
self.assertEqual(res, "OK:off")
res = self.cli.send_command("rawsave").strip()
self.assertEqual(res, "off")
res = self.cli.send_command("rawsave on").strip()
self.assertEqual(res, "OK:on")
res = self.cli.send_command("rawsave").strip()
self.assertEqual(res, "on")
# runno
res = self.cli.send_command("runno 1").strip()
self.assertEqual(res, "OK:1")
res = self.cli.send_command("runno").strip()
self.assertEqual(res, "1")
res = self.cli.send_command("runno 2").strip()
self.assertEqual(res, "OK:2")
# exit
res = self.cli.send_command("exit")
self.assertTrue(res is None)
def test_daq(self):
self.assertFalse(os.path.isdir(self.log_dir))
self.cli.send_command("rawsave on")
res = self.cli.send_command("stat").strip()
print("===== stat before daq")
print(res)
print("/=====")
res = self.cli.send_command("run 100").strip()
self.assertTrue(res.startswith("NG:"))
res = self.cli.send_command("run").strip()
print("=====")
print(res)
print("/=====")
time.sleep(1)
res = self.cli.send_command("stat").strip()
print("===== stat during daq")
print(res)
print("/=====")
res = self.cli.send_command("run").strip()
self.assertTrue(res.startswith("NG:"))
time.sleep(1)
res = self.cli.send_command("stop 100").strip()
self.assertTrue(res.startswith("NG:"))
res = self.cli.send_command("stop").strip()
print("=====")
print(res)
print("/=====")
res = self.cli.send_command("stat").strip()
print("===== stat after daq")
print(res)
print("/=====")
self.assertTrue(os.path.isdir(self.log_dir))
if __name__ == "__main__":
print("python version: {0}.{1}.{2}".format(sys.version_info[0],
sys.version_info[1],
sys.version_info[2]))
print("default encoding: {0}".format(sys.getdefaultencoding()))
print()
unittest.main() | tests/test_cui_project.py | from __future__ import print_function
import os
import shutil
import sys
import time
from unittest import TestCase
import unittest
from sitcpy.cui import CuiServer, SessionThread, CommandClient
from sitcpy.rbcp_server import default_pseudo_arg_parser, RbcpServer,\
PseudoDevice
from sitcpy.templates.cui_project import daq
from sitcpy.templates.cui_project.daq import DaqCommandHandler
from sitcpy.templates.cui_project.pseudo import PseudoDataGenerator,\
PseudoRbcpCommandHandler
PROMPT = "daq$ "
class DaqTest(TestCase):
def setUp(self):
# pseudo
args = default_pseudo_arg_parser().parse_args([])
command_port = args.port
data_port = args.dataport
rbcp_server = RbcpServer()
data_generator = PseudoDataGenerator()
rbcp_command_handler = PseudoRbcpCommandHandler("pdev$ ")
rbcp_command_handler.bind(rbcp_server, data_generator)
self.pdev = PseudoDevice(rbcp_command_handler, data_generator,
rbcp_server, command_port, data_port)
self.pdev.start()
# daq
self.run_no_txt_path = os.path.join(
os.path.dirname(daq.__file__), "run_no.txt")
if os.path.isfile(self.run_no_txt_path):
os.remove(self.run_no_txt_path)
self.log_dir = "log"
shutil.rmtree(self.log_dir, ignore_errors=True)
self.handler = DaqCommandHandler(PROMPT)
self.server = CuiServer(SessionThread, self.handler, 5050)
self.server.start()
self.cli = CommandClient(PROMPT, "localhost", 5050)
def tearDown(self):
self.cli.close()
self.server.stop()
self.server.join()
self.pdev.stop()
if os.path.isfile(self.run_no_txt_path):
os.remove(self.run_no_txt_path)
shutil.rmtree(self.log_dir, ignore_errors=True)
def test_commands(self):
res = self.cli.send_command("reload").strip()
self.assertTrue(res.startswith("OK:"))
res = self.cli.send_command("reload _file_not_found_error_").strip()
self.assertTrue(res.startswith("NG:"))
# stat
res = self.cli.send_command("stat").strip()
self.assertTrue(len(res.splitlines()) > 5)
for val in res.splitlines():
if val:
self.assertEqual(len(val.split("=", 1)), 2)
# rawsave
res = self.cli.send_command("rawsave").strip()
self.assertEqual(res, "off")
res = self.cli.send_command("rawsave off").strip()
self.assertEqual(res, "OK:off")
res = self.cli.send_command("rawsave").strip()
self.assertEqual(res, "off")
res = self.cli.send_command("rawsave on").strip()
self.assertEqual(res, "OK:on")
res = self.cli.send_command("rawsave").strip()
self.assertEqual(res, "on")
# runno
res = self.cli.send_command("runno 1").strip()
self.assertEqual(res, "OK:1")
res = self.cli.send_command("runno").strip()
self.assertEqual(res, "1")
res = self.cli.send_command("runno 2").strip()
self.assertEqual(res, "OK:2")
# exit
res = self.cli.send_command("exit")
self.assertTrue(res is None)
def test_daq(self):
self.assertFalse(os.path.isdir(self.log_dir))
self.cli.send_command("rawsave on")
res = self.cli.send_command("stat").strip()
print("===== stat before daq")
print(res)
print("/=====")
res = self.cli.send_command("run 100").strip()
self.assertTrue(res.startswith("NG:"))
res = self.cli.send_command("run").strip()
print("=====")
print(res)
print("/=====")
time.sleep(1)
res = self.cli.send_command("stat").strip()
print("===== stat during daq")
print(res)
print("/=====")
res = self.cli.send_command("run").strip()
self.assertTrue(res.startswith("NG:"))
time.sleep(1)
res = self.cli.send_command("stop 100").strip()
self.assertTrue(res.startswith("NG:"))
res = self.cli.send_command("stop").strip()
print("=====")
print(res)
print("/=====")
res = self.cli.send_command("stat").strip()
print("===== stat after daq")
print(res)
print("/=====")
self.assertTrue(os.path.isdir(self.log_dir))
if __name__ == "__main__":
print("python version: {0}.{1}.{2}".format(sys.version_info[0],
sys.version_info[1],
sys.version_info[2]))
print("default encoding: {0}".format(sys.getdefaultencoding()))
print()
unittest.main() | 0.301568 | 0.121842 |
import logging
import os
import time
from socket import gethostname
from flask import g, Flask, jsonify, request, send_from_directory, render_template
from werkzeug.middleware.proxy_fix import ProxyFix
from wbc.exceptions import WBCApiError, WBCHtmlError
from wbc.views.healthcheck import Healthcheck
from wbc.views.api import Document, Issue, Search, Suggest
from wbc.views.html import DocumentHTML, HomeHTML, SearchHTML
from .assets import register_assets
from .common import get_app_version
app = Flask(import_name=__name__)
# config
# @see http://stackoverflow.com/a/37331139/5446110
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 365 * 86400 # a year
# healthcheck
app.add_url_rule('/healthcheck', view_func=Healthcheck.as_view('healthcheck'))
# API
app.add_url_rule('/api/v1/documents/<int:document_id>', view_func=Document.as_view('documents'))
app.add_url_rule('/api/v1/documents/<int:document_id>.txt', view_func=Document.as_view('documents.txt'))
app.add_url_rule('/api/v1/issues/<int:issue_id>', view_func=Issue.as_view('issues'))
app.add_url_rule('/api/v1/search', view_func=Search.as_view('search'))
app.add_url_rule('/api/v1/suggest', view_func=Suggest.as_view('suggest'))
# HTML
app.add_url_rule('/', view_func=HomeHTML.as_view('home.html'))
app.add_url_rule('/document/<int:document_id>.html', view_func=DocumentHTML.as_view('documents-short.html'))
app.add_url_rule('/document/<int:document_id>/<string:name>.html', view_func=DocumentHTML.as_view('documents.html'))
app.add_url_rule('/search', view_func=SearchHTML.as_view('search.html'))
# favicon
root_path = app.root_path
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(root_path, 'static'),
filename='img/favicon.ico', mimetype='image/vnd.microsoft.icon')
# robots.txt and sitemaps
@app.route('/robots.txt')
def robots():
return send_from_directory(os.path.join(root_path, 'static'),
filename='robots.txt', mimetype='text/plain', cache_timeout=86400)
# @see http://flask.pocoo.org/snippets/57/
@app.route('/sitemap.xml', defaults={'sitemap_id': 'index'})
@app.route('/sitemap-<string:sitemap_id>.xml')
def sitemap(sitemap_id):
"""
:type sitemap_id str
:rtype: flask.wrappers.ResponseBase
"""
return send_from_directory(os.path.join(root_path, 'sitemap'),
filename='sitemap-{}.xml'.format(sitemap_id),
mimetype='text/xml', cache_timeout=86400, add_etags=False)
# errors handling
@app.errorhandler(WBCApiError)
def handle_bad_api_request(e):
"""
:type e WBCApiError
"""
return jsonify(
error=True,
details=e.get_message()
), e.get_response_code()
# errors handling
@app.errorhandler(WBCHtmlError)
def handle_bad_html_request(e):
"""
:type e WBCHtmlError
"""
return render_template('error.html', message=e.get_message(), code=e.get_response_code()), e.get_response_code()
@app.errorhandler(404)
def handle_not_found(e):
# API requests - return error messages as JSON
if request.path.startswith('/api'):
return handle_bad_api_request(WBCApiError('API end-point not found', 404))
# emit HTML
else:
return '<strong>HTTP 404</strong> not found', 404
# measure response time
@app.before_request
def app_before_request():
g.start = time.time()
hostname = gethostname() # cache to avoid uname syscall on each request
@app.after_request
def app_after_request(response):
"""
:type response flask.wrappers.ResponseBase
:rtype: flask.wrappers.ResponseBase
"""
response.headers.set('X-Backend-Response-Time', '{:.4f}'.format(time.time() - g.start))
response.headers.set('X-Served-By', hostname)
return response
# setup logging
is_debug = os.environ.get('DEBUG')
logging.basicConfig(
level=logging.DEBUG if is_debug else logging.INFO,
format='%(asctime)s %(name)-25s %(levelname)-8s %(message)s',
datefmt="%Y-%m-%d %H:%M:%S"
)
# emit git hash and register a helper function for templates
app.logger.info('{} is now running using code {}'.format(app.name, get_app_version()))
app.jinja_env.globals.update(get_app_version=get_app_version)
# register assets
register_assets(app)
# ProxyFix
app.wsgi_app = ProxyFix(app.wsgi_app) | app/wbc/__init__.py | import logging
import os
import time
from socket import gethostname
from flask import g, Flask, jsonify, request, send_from_directory, render_template
from werkzeug.middleware.proxy_fix import ProxyFix
from wbc.exceptions import WBCApiError, WBCHtmlError
from wbc.views.healthcheck import Healthcheck
from wbc.views.api import Document, Issue, Search, Suggest
from wbc.views.html import DocumentHTML, HomeHTML, SearchHTML
from .assets import register_assets
from .common import get_app_version
app = Flask(import_name=__name__)
# config
# @see http://stackoverflow.com/a/37331139/5446110
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 365 * 86400 # a year
# healthcheck
app.add_url_rule('/healthcheck', view_func=Healthcheck.as_view('healthcheck'))
# API
app.add_url_rule('/api/v1/documents/<int:document_id>', view_func=Document.as_view('documents'))
app.add_url_rule('/api/v1/documents/<int:document_id>.txt', view_func=Document.as_view('documents.txt'))
app.add_url_rule('/api/v1/issues/<int:issue_id>', view_func=Issue.as_view('issues'))
app.add_url_rule('/api/v1/search', view_func=Search.as_view('search'))
app.add_url_rule('/api/v1/suggest', view_func=Suggest.as_view('suggest'))
# HTML
app.add_url_rule('/', view_func=HomeHTML.as_view('home.html'))
app.add_url_rule('/document/<int:document_id>.html', view_func=DocumentHTML.as_view('documents-short.html'))
app.add_url_rule('/document/<int:document_id>/<string:name>.html', view_func=DocumentHTML.as_view('documents.html'))
app.add_url_rule('/search', view_func=SearchHTML.as_view('search.html'))
# favicon
root_path = app.root_path
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(root_path, 'static'),
filename='img/favicon.ico', mimetype='image/vnd.microsoft.icon')
# robots.txt and sitemaps
@app.route('/robots.txt')
def robots():
return send_from_directory(os.path.join(root_path, 'static'),
filename='robots.txt', mimetype='text/plain', cache_timeout=86400)
# @see http://flask.pocoo.org/snippets/57/
@app.route('/sitemap.xml', defaults={'sitemap_id': 'index'})
@app.route('/sitemap-<string:sitemap_id>.xml')
def sitemap(sitemap_id):
"""
:type sitemap_id str
:rtype: flask.wrappers.ResponseBase
"""
return send_from_directory(os.path.join(root_path, 'sitemap'),
filename='sitemap-{}.xml'.format(sitemap_id),
mimetype='text/xml', cache_timeout=86400, add_etags=False)
# errors handling
@app.errorhandler(WBCApiError)
def handle_bad_api_request(e):
"""
:type e WBCApiError
"""
return jsonify(
error=True,
details=e.get_message()
), e.get_response_code()
# errors handling
@app.errorhandler(WBCHtmlError)
def handle_bad_html_request(e):
"""
:type e WBCHtmlError
"""
return render_template('error.html', message=e.get_message(), code=e.get_response_code()), e.get_response_code()
@app.errorhandler(404)
def handle_not_found(e):
# API requests - return error messages as JSON
if request.path.startswith('/api'):
return handle_bad_api_request(WBCApiError('API end-point not found', 404))
# emit HTML
else:
return '<strong>HTTP 404</strong> not found', 404
# measure response time
@app.before_request
def app_before_request():
g.start = time.time()
hostname = gethostname() # cache to avoid uname syscall on each request
@app.after_request
def app_after_request(response):
"""
:type response flask.wrappers.ResponseBase
:rtype: flask.wrappers.ResponseBase
"""
response.headers.set('X-Backend-Response-Time', '{:.4f}'.format(time.time() - g.start))
response.headers.set('X-Served-By', hostname)
return response
# setup logging
is_debug = os.environ.get('DEBUG')
logging.basicConfig(
level=logging.DEBUG if is_debug else logging.INFO,
format='%(asctime)s %(name)-25s %(levelname)-8s %(message)s',
datefmt="%Y-%m-%d %H:%M:%S"
)
# emit git hash and register a helper function for templates
app.logger.info('{} is now running using code {}'.format(app.name, get_app_version()))
app.jinja_env.globals.update(get_app_version=get_app_version)
# register assets
register_assets(app)
# ProxyFix
app.wsgi_app = ProxyFix(app.wsgi_app) | 0.350088 | 0.047448 |
def sisalampotila():
sisanyt = tempread.read_temp_in()
return sisanyt
def main(day, month, year, hour):
# Setup-osa
import lukeminen
#Muodostetaan str-tyyppinen päiväys muodossa "dd.mm.yy" Nordpoolin hintatietojen lukemiseksi
if day < 10:
day = str(day)
day = str("0"+day)
if month < 10:
month = str(month)
month = str("0"+month)
pvm = str("{}.{}.{}".format(day, month, year))
print(pvm)
tiedot = lukeminen.luetiedot(pvm)
minimi = lukeminen.minimi(tiedot)
min_delta = lukeminen.min_delta(tiedot, minimi)
# Etsitään ohjelma-tilan alut ja loput
print("emptying csv-file\n")
open("tasklists/tasklist-prog.csv", "w").close()
print("entering loop")
h=0
while(True):
print("h=", h)
for rivi in min_delta[h:]:
print(rivi)
h+=1
print("h=", h)
if rivi > 0.19:
f = open("tasklists/tasklist-prog.csv", "w") # Tähän alkuhetken tallennus
f.write("start,{:d}-{:d}-{:d},{:d}:{:d}:{:d}\n".format(int(year), int(month), int(day), h-1, 0, 0))
for rivi in min_delta[h-1:]:
print(rivi)
h+=1
if rivi < 0.19:
f = open("tasklists/tasklist-prog.csv", "a") # Tähän päättymishetken tallennus
f.write("end,{:d}-{:d}-{:d},{:d}:{:d}:{:d}\n".format(int(year), int(month), int(day), h-1, 0, 0))
print("stop") # Tähän päättymishetken tallennus
break
break
# ongelmatilanne, jossa päivän viimeinen luku ei ole alle Pd tai Pdd, jolloin looppi jää avoimeksi.
if h==24:
break
#Ratkaistaan prog-tilan aikaväli, jonka jälkeen lasketaan mihin lämpötilaan rakennus pitää lämmittää ennen välin alkua.
#Välin aikana lasketaan milloin pitää lämmittää lisää jotta lämpötila pysyy Tmin yläpuolella. | tulkinta.py | def sisalampotila():
sisanyt = tempread.read_temp_in()
return sisanyt
def main(day, month, year, hour):
# Setup-osa
import lukeminen
#Muodostetaan str-tyyppinen päiväys muodossa "dd.mm.yy" Nordpoolin hintatietojen lukemiseksi
if day < 10:
day = str(day)
day = str("0"+day)
if month < 10:
month = str(month)
month = str("0"+month)
pvm = str("{}.{}.{}".format(day, month, year))
print(pvm)
tiedot = lukeminen.luetiedot(pvm)
minimi = lukeminen.minimi(tiedot)
min_delta = lukeminen.min_delta(tiedot, minimi)
# Etsitään ohjelma-tilan alut ja loput
print("emptying csv-file\n")
open("tasklists/tasklist-prog.csv", "w").close()
print("entering loop")
h=0
while(True):
print("h=", h)
for rivi in min_delta[h:]:
print(rivi)
h+=1
print("h=", h)
if rivi > 0.19:
f = open("tasklists/tasklist-prog.csv", "w") # Tähän alkuhetken tallennus
f.write("start,{:d}-{:d}-{:d},{:d}:{:d}:{:d}\n".format(int(year), int(month), int(day), h-1, 0, 0))
for rivi in min_delta[h-1:]:
print(rivi)
h+=1
if rivi < 0.19:
f = open("tasklists/tasklist-prog.csv", "a") # Tähän päättymishetken tallennus
f.write("end,{:d}-{:d}-{:d},{:d}:{:d}:{:d}\n".format(int(year), int(month), int(day), h-1, 0, 0))
print("stop") # Tähän päättymishetken tallennus
break
break
# ongelmatilanne, jossa päivän viimeinen luku ei ole alle Pd tai Pdd, jolloin looppi jää avoimeksi.
if h==24:
break
#Ratkaistaan prog-tilan aikaväli, jonka jälkeen lasketaan mihin lämpötilaan rakennus pitää lämmittää ennen välin alkua.
#Välin aikana lasketaan milloin pitää lämmittää lisää jotta lämpötila pysyy Tmin yläpuolella. | 0.128881 | 0.26011 |
import numpy
def fill_in_zeros(fwd_rvs_align_list, ref_len, nt):
"""
Generate alignment counts for every nucleotide in the reference
:param fwd_rvs_align_list: list of sorted forwards and reverse alignments
:param ref_len: number of nucleotides in the reference sequence (int)
:param nt: length of the aligned reads (int)
:return: reference_x_axis ([0,0,...] (list(int)) - length of refseq seq,
fwd_alignment_y_axis [2,4,5.2,6,....] (list(float)) - sense strand alignment count (positive),
fwd_rvs_align_list [-3,-4,-5.6,...] (list(float)) - antisense strand alignment count (negative)
"""
sorted_fwd_alignment = fwd_rvs_align_list[0]
sorted_rvs_alignment = fwd_rvs_align_list[1]
fwd_alignment_y_axis = [0] * ref_len
revs_alignment_y_axis = [0] * ref_len
reference_x_axis = list(range(0, ref_len))
#Note alignment position for graphing is in the centre of the read (and not the 5' end)
for i in sorted_fwd_alignment:
fwd_alignment_y_axis[i[0] + int(nt / 2)] = i[1]
for i in sorted_rvs_alignment:
revs_alignment_y_axis[i[0] - int(nt / 2)] = i[1]
# #Coverage per nucleotide instead - maybe use?
# for i in sorted_fwd_alignment:
# for j in range(nt):
# fwd_alignment_y_axis[i[0]+j]+=i[1]
# for i in sorted_rvs_alignment:
# for j in range(nt):
# revs_alignment_y_axis[i[0]-j]+=i[1]
return reference_x_axis, fwd_alignment_y_axis, revs_alignment_y_axis
def calc_alignments_by_strand(fwd_rvs_align_list):
"""
:param fwd_rvs_align_list: list of sorted forwards and reverse alignments
:return: Total RPMR aligned for fwd and rvs strands (float)
"""
sorted_fwd_alignment = fwd_rvs_align_list[0]
sorted_rvs_alignment = fwd_rvs_align_list[1]
fwd_align_count = 0
rvs_align_count = 0
for i in sorted_fwd_alignment:
fwd_align_count += i[1]
for i in sorted_rvs_alignment:
rvs_align_count -= i[1]
return fwd_align_count, rvs_align_count
def smooth(x, window_len, window='hamming'):
"""
Smoothing function from scipy cookbook
:param x:
:param window_len:
:param window:
:return:
"""
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len < 6:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError("Window is one of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
s = numpy.r_[x[window_len - 1:0:-1], x, x[-1:-window_len:-1]]
if window == 'flat': # moving average
w = numpy.ones(window_len, 'd')
else:
w = eval('numpy.' + window + '(window_len)')
y = numpy.convolve(w / w.sum(), s, mode='valid')
return y[int(window_len / 2 - 1):-int(window_len / 2)] | scram_modules/post_process.py | import numpy
def fill_in_zeros(fwd_rvs_align_list, ref_len, nt):
"""
Generate alignment counts for every nucleotide in the reference
:param fwd_rvs_align_list: list of sorted forwards and reverse alignments
:param ref_len: number of nucleotides in the reference sequence (int)
:param nt: length of the aligned reads (int)
:return: reference_x_axis ([0,0,...] (list(int)) - length of refseq seq,
fwd_alignment_y_axis [2,4,5.2,6,....] (list(float)) - sense strand alignment count (positive),
fwd_rvs_align_list [-3,-4,-5.6,...] (list(float)) - antisense strand alignment count (negative)
"""
sorted_fwd_alignment = fwd_rvs_align_list[0]
sorted_rvs_alignment = fwd_rvs_align_list[1]
fwd_alignment_y_axis = [0] * ref_len
revs_alignment_y_axis = [0] * ref_len
reference_x_axis = list(range(0, ref_len))
#Note alignment position for graphing is in the centre of the read (and not the 5' end)
for i in sorted_fwd_alignment:
fwd_alignment_y_axis[i[0] + int(nt / 2)] = i[1]
for i in sorted_rvs_alignment:
revs_alignment_y_axis[i[0] - int(nt / 2)] = i[1]
# #Coverage per nucleotide instead - maybe use?
# for i in sorted_fwd_alignment:
# for j in range(nt):
# fwd_alignment_y_axis[i[0]+j]+=i[1]
# for i in sorted_rvs_alignment:
# for j in range(nt):
# revs_alignment_y_axis[i[0]-j]+=i[1]
return reference_x_axis, fwd_alignment_y_axis, revs_alignment_y_axis
def calc_alignments_by_strand(fwd_rvs_align_list):
"""
:param fwd_rvs_align_list: list of sorted forwards and reverse alignments
:return: Total RPMR aligned for fwd and rvs strands (float)
"""
sorted_fwd_alignment = fwd_rvs_align_list[0]
sorted_rvs_alignment = fwd_rvs_align_list[1]
fwd_align_count = 0
rvs_align_count = 0
for i in sorted_fwd_alignment:
fwd_align_count += i[1]
for i in sorted_rvs_alignment:
rvs_align_count -= i[1]
return fwd_align_count, rvs_align_count
def smooth(x, window_len, window='hamming'):
"""
Smoothing function from scipy cookbook
:param x:
:param window_len:
:param window:
:return:
"""
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len < 6:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError("Window is one of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
s = numpy.r_[x[window_len - 1:0:-1], x, x[-1:-window_len:-1]]
if window == 'flat': # moving average
w = numpy.ones(window_len, 'd')
else:
w = eval('numpy.' + window + '(window_len)')
y = numpy.convolve(w / w.sum(), s, mode='valid')
return y[int(window_len / 2 - 1):-int(window_len / 2)] | 0.646125 | 0.468912 |
import collections
import boto3
import botocore
import logging
import os
import uuid
logger = logging.getLogger(__name__)
def downloadFile(srcFile, destFile):
''' Download file from S3 '''
s3 = boto3.resource('s3')
bucket, key = find_bucket_key(srcFile)
try:
s3.meta.client.download_file(bucket, key, destFile)
except botocore.exceptions.ClientError as e:
logger.warn(e)
def exists(s3path):
''' Return true is s3path is an object, else false '''
s3 = boto3.resource('s3')
bucket, key = find_bucket_key(s3path)
try:
s3.Object(bucket, key).load()
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
# The object does not exist.
return False
else:
# Something else has gone wrong.
raise
# The object does exist.
return True
def uploadFile(srcFile, destFile):
''' Upload a file to S3 '''
s3 = boto3.resource('s3')
bucket, key = find_bucket_key(destFile)
s3.meta.client.upload_file(srcFile, bucket, key,
ExtraArgs={'ServerSideEncryption': 'AES256'})
def find_bucket_key(s3path):
"""
This is a helper function that given an s3 path such that the path is of
the form: bucket/key
It will return the bucket and the key represented by the s3 path, eg
if s3path == s3://bmsrd-ngs-data/P-234
"""
if s3path.startswith('s3://'):
s3path = s3path[5:]
s3components = s3path.split('/')
bucket = s3components[0]
s3key = ""
if len(s3components) > 1:
s3key = '/'.join(s3components[1:])
return bucket, s3key
def listFiles(s3_cache_dir, suffix=None):
''' Return a list of files in s3_cache_dir '''
files = []
bucketName, folder = find_bucket_key(s3_cache_dir)
s3bucket = 's3://%s/' % bucketName
s3 = boto3.resource('s3')
bucket = s3.Bucket(bucketName)
for obj in bucket.objects.filter(Prefix=folder + '/'):
if suffix:
if obj.key.endswith(suffix):
files.append(s3bucket + obj.key)
else:
files.append(s3bucket + obj.key)
return files
def readSamples(sampleSheetFile):
'''
readSamples reads in a sampleSheetFile consisting of three columns:
name, bamfile, reference
:param sampleSheetFile: tab-delimited file
:return: list of {name, bam, reference} dictionaries
'''
if os.path.isfile(sampleSheetFile) is False:
logger.error("%s does not exist", sampleSheetFile)
return False
logger.info("Reading %s", sampleSheetFile)
sampleNames = []
samples = []
with open(sampleSheetFile, 'r') as f:
for line in f:
line = line.rstrip('\r\n')
if len(line) == 0 or line.startswith('#'):
continue
fields = line.split('\t')
if len(fields) != 3:
logger.error("Expected 3 columns in samplesheet, but found %s", len(fields))
return False
sampleNames.append(fields[0])
sample = {"name": fields[0],
"bam": fields[1],
"reference": fields[2]}
samples.append(sample)
duplicates = [item for item, count in collections.Counter(sampleNames).items() if count > 1]
if duplicates:
logger.error("Duplicate sampleids found in %s", sampleSheetFile)
for dup in duplicates:
logger.error(dup)
return False
logger.info("Read %d samples.", len(samples))
return samples
def generate_working_dir(working_dir_base):
"""
Creates a unique working directory to combat job multitenancy
:param working_dir_base: base working directory
:return: a unique subfolder in working_dir_base with a uuid
"""
working_dir = os.path.join(working_dir_base, str(uuid.uuid4()))
try:
os.mkdir(working_dir)
except Exception as e:
return working_dir_base
return working_dir
def delete_working_dir(working_dir):
"""
Deletes working directory
:param working_dir: working directory
"""
try:
shutil.rmtree(working_dir)
except Exception as e:
print ('Can\'t delete %s' % working_dir) | common.py | import collections
import boto3
import botocore
import logging
import os
import uuid
logger = logging.getLogger(__name__)
def downloadFile(srcFile, destFile):
''' Download file from S3 '''
s3 = boto3.resource('s3')
bucket, key = find_bucket_key(srcFile)
try:
s3.meta.client.download_file(bucket, key, destFile)
except botocore.exceptions.ClientError as e:
logger.warn(e)
def exists(s3path):
''' Return true is s3path is an object, else false '''
s3 = boto3.resource('s3')
bucket, key = find_bucket_key(s3path)
try:
s3.Object(bucket, key).load()
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
# The object does not exist.
return False
else:
# Something else has gone wrong.
raise
# The object does exist.
return True
def uploadFile(srcFile, destFile):
''' Upload a file to S3 '''
s3 = boto3.resource('s3')
bucket, key = find_bucket_key(destFile)
s3.meta.client.upload_file(srcFile, bucket, key,
ExtraArgs={'ServerSideEncryption': 'AES256'})
def find_bucket_key(s3path):
"""
This is a helper function that given an s3 path such that the path is of
the form: bucket/key
It will return the bucket and the key represented by the s3 path, eg
if s3path == s3://bmsrd-ngs-data/P-234
"""
if s3path.startswith('s3://'):
s3path = s3path[5:]
s3components = s3path.split('/')
bucket = s3components[0]
s3key = ""
if len(s3components) > 1:
s3key = '/'.join(s3components[1:])
return bucket, s3key
def listFiles(s3_cache_dir, suffix=None):
''' Return a list of files in s3_cache_dir '''
files = []
bucketName, folder = find_bucket_key(s3_cache_dir)
s3bucket = 's3://%s/' % bucketName
s3 = boto3.resource('s3')
bucket = s3.Bucket(bucketName)
for obj in bucket.objects.filter(Prefix=folder + '/'):
if suffix:
if obj.key.endswith(suffix):
files.append(s3bucket + obj.key)
else:
files.append(s3bucket + obj.key)
return files
def readSamples(sampleSheetFile):
'''
readSamples reads in a sampleSheetFile consisting of three columns:
name, bamfile, reference
:param sampleSheetFile: tab-delimited file
:return: list of {name, bam, reference} dictionaries
'''
if os.path.isfile(sampleSheetFile) is False:
logger.error("%s does not exist", sampleSheetFile)
return False
logger.info("Reading %s", sampleSheetFile)
sampleNames = []
samples = []
with open(sampleSheetFile, 'r') as f:
for line in f:
line = line.rstrip('\r\n')
if len(line) == 0 or line.startswith('#'):
continue
fields = line.split('\t')
if len(fields) != 3:
logger.error("Expected 3 columns in samplesheet, but found %s", len(fields))
return False
sampleNames.append(fields[0])
sample = {"name": fields[0],
"bam": fields[1],
"reference": fields[2]}
samples.append(sample)
duplicates = [item for item, count in collections.Counter(sampleNames).items() if count > 1]
if duplicates:
logger.error("Duplicate sampleids found in %s", sampleSheetFile)
for dup in duplicates:
logger.error(dup)
return False
logger.info("Read %d samples.", len(samples))
return samples
def generate_working_dir(working_dir_base):
"""
Creates a unique working directory to combat job multitenancy
:param working_dir_base: base working directory
:return: a unique subfolder in working_dir_base with a uuid
"""
working_dir = os.path.join(working_dir_base, str(uuid.uuid4()))
try:
os.mkdir(working_dir)
except Exception as e:
return working_dir_base
return working_dir
def delete_working_dir(working_dir):
"""
Deletes working directory
:param working_dir: working directory
"""
try:
shutil.rmtree(working_dir)
except Exception as e:
print ('Can\'t delete %s' % working_dir) | 0.341363 | 0.109658 |
import tensorflow as tf
import numpy as np
class PatrollerValue(object):
def __init__(self, args, scope):
with tf.variable_scope(scope):
self.args = args
self.row_num = self.args.row_num
self.column_num = self.args.column_num
self.in_channel = self.args.pa_state_size
self.id_action = {
0: 'still',
1: 'up',
2: 'down',
3: 'left',
4: 'right'
}
self.variables = []
# Input placeholders
self.input_state = tf.placeholder(tf.float32, [None, self.row_num, self.column_num, self.in_channel])
self.state_values_target = tf.placeholder(tf.float32)
# 4-D, [batch_size, row, col, state_channel] [-1, 7, 7, 10]
self.learning_rate = tf.placeholder(tf.float32)
# Build Graph
with tf.variable_scope('conv-maxpool-0'):
if self.args.row_num == 7:
filter_shape = [4, 4, self.in_channel, 16]
elif self.args.row_num == 5:
filter_shape = [3, 3, self.in_channel, 16]
elif self.args.row_num == 3:
filter_shape = [2, 2, self.in_channel, 16]
self.W0 = tf.get_variable(name='weights',
initializer=tf.truncated_normal(filter_shape, stddev=0.001))
self.b0 = tf.get_variable(name='bias', initializer=tf.zeros([16]))
self.conv0 = tf.nn.conv2d(self.input_state, self.W0, strides=[1, 1, 1, 1], padding="SAME")
self.conv0 = tf.nn.relu(tf.nn.bias_add(self.conv0, self.b0), name="relu")
# Batch x 7 x 7 x 16
self.variables.append(self.W0)
self.variables.append(self.b0)
with tf.variable_scope('conv-maxpool-1'):
filter_shape = [2, 2, 16, 32]
self.W1 = tf.get_variable(name='weights',
initializer=tf.truncated_normal(filter_shape, stddev=0.001))
self.b1 = tf.get_variable(name='bias', initializer=tf.zeros([32]))
self.conv1 = tf.nn.conv2d(self.conv0, self.W1, strides=[1, 2, 2, 1], padding="SAME")
self.conv1 = tf.nn.relu(tf.nn.bias_add(self.conv1, self.b1), name="relu")
# Batch x 4 x 4 x 32
self.variables.append(self.W1)
self.variables.append(self.b1)
with tf.variable_scope('fc0'):
if self.args.row_num == 7:
self.Wf0 = tf.get_variable(name='weights',
initializer=tf.truncated_normal([4 * 4 * 32, 64], stddev=0.001))
self.fc0 = tf.reshape(self.conv1, [-1, 4 * 4 * 32])
elif self.args.row_num == 5:
self.Wf0 = tf.get_variable(name='weights',
initializer=tf.truncated_normal([3 * 3 * 32, 64], stddev=0.001))
self.fc0 = tf.reshape(self.conv1, [-1, 3 * 3 * 32])
elif self.args.row_num == 3:
self.Wf0 = tf.get_variable(name='weights',
initializer=tf.truncated_normal([2 * 2 * 32, 64], stddev=0.001))
self.fc0 = tf.reshape(self.conv1, [-1, 2 * 2 * 32])
# self.Wf0 = tf.get_variable(name='weights',
# initializer=tf.truncated_normal([4 * 4 * 32, 64], stddev=0.001))
self.bf0 = tf.get_variable(name='bias', initializer=tf.zeros([64]))
# self.fc0 = tf.reshape(self.conv1, [-1, 4 * 4 * 32])
self.fc0 = tf.add(tf.matmul(self.fc0, self.Wf0), self.bf0)
self.fc0 = tf.nn.relu(self.fc0)
self.variables.append(self.Wf0)
self.variables.append(self.bf0)
with tf.variable_scope('out'):
self.Wo = tf.get_variable(name='weights',
initializer=tf.truncated_normal([64, 1]))
self.bo = tf.get_variable(name='bias', initializer=tf.zeros([1]))
self.state_values = tf.reshape(tf.add(tf.matmul(self.fc0, self.Wo), self.bo), [-1]) # batch_size
self.variables.append(self.Wo)
self.variables.append(self.bo)
# Train operation
self.loss = tf.reduce_mean(tf.square(self.state_values - self.state_values_target))
self.train_op = tf.train.RMSPropOptimizer(learning_rate=self.learning_rate, momentum=0.95,
epsilon=0.01).minimize(self.loss)
def get_state_value(self, sess, states):
state_values = sess.run(self.state_values, {self.input_state: states})
return state_values
def save(self, sess, filename):
saver = tf.train.Saver(self.variables)
saver.save(sess, filename)
def load(self, sess, filename):
saver = tf.train.Saver(self.variables)
saver.restore(sess, filename) | AC_patroller/patroller_value_network.py | import tensorflow as tf
import numpy as np
class PatrollerValue(object):
def __init__(self, args, scope):
with tf.variable_scope(scope):
self.args = args
self.row_num = self.args.row_num
self.column_num = self.args.column_num
self.in_channel = self.args.pa_state_size
self.id_action = {
0: 'still',
1: 'up',
2: 'down',
3: 'left',
4: 'right'
}
self.variables = []
# Input placeholders
self.input_state = tf.placeholder(tf.float32, [None, self.row_num, self.column_num, self.in_channel])
self.state_values_target = tf.placeholder(tf.float32)
# 4-D, [batch_size, row, col, state_channel] [-1, 7, 7, 10]
self.learning_rate = tf.placeholder(tf.float32)
# Build Graph
with tf.variable_scope('conv-maxpool-0'):
if self.args.row_num == 7:
filter_shape = [4, 4, self.in_channel, 16]
elif self.args.row_num == 5:
filter_shape = [3, 3, self.in_channel, 16]
elif self.args.row_num == 3:
filter_shape = [2, 2, self.in_channel, 16]
self.W0 = tf.get_variable(name='weights',
initializer=tf.truncated_normal(filter_shape, stddev=0.001))
self.b0 = tf.get_variable(name='bias', initializer=tf.zeros([16]))
self.conv0 = tf.nn.conv2d(self.input_state, self.W0, strides=[1, 1, 1, 1], padding="SAME")
self.conv0 = tf.nn.relu(tf.nn.bias_add(self.conv0, self.b0), name="relu")
# Batch x 7 x 7 x 16
self.variables.append(self.W0)
self.variables.append(self.b0)
with tf.variable_scope('conv-maxpool-1'):
filter_shape = [2, 2, 16, 32]
self.W1 = tf.get_variable(name='weights',
initializer=tf.truncated_normal(filter_shape, stddev=0.001))
self.b1 = tf.get_variable(name='bias', initializer=tf.zeros([32]))
self.conv1 = tf.nn.conv2d(self.conv0, self.W1, strides=[1, 2, 2, 1], padding="SAME")
self.conv1 = tf.nn.relu(tf.nn.bias_add(self.conv1, self.b1), name="relu")
# Batch x 4 x 4 x 32
self.variables.append(self.W1)
self.variables.append(self.b1)
with tf.variable_scope('fc0'):
if self.args.row_num == 7:
self.Wf0 = tf.get_variable(name='weights',
initializer=tf.truncated_normal([4 * 4 * 32, 64], stddev=0.001))
self.fc0 = tf.reshape(self.conv1, [-1, 4 * 4 * 32])
elif self.args.row_num == 5:
self.Wf0 = tf.get_variable(name='weights',
initializer=tf.truncated_normal([3 * 3 * 32, 64], stddev=0.001))
self.fc0 = tf.reshape(self.conv1, [-1, 3 * 3 * 32])
elif self.args.row_num == 3:
self.Wf0 = tf.get_variable(name='weights',
initializer=tf.truncated_normal([2 * 2 * 32, 64], stddev=0.001))
self.fc0 = tf.reshape(self.conv1, [-1, 2 * 2 * 32])
# self.Wf0 = tf.get_variable(name='weights',
# initializer=tf.truncated_normal([4 * 4 * 32, 64], stddev=0.001))
self.bf0 = tf.get_variable(name='bias', initializer=tf.zeros([64]))
# self.fc0 = tf.reshape(self.conv1, [-1, 4 * 4 * 32])
self.fc0 = tf.add(tf.matmul(self.fc0, self.Wf0), self.bf0)
self.fc0 = tf.nn.relu(self.fc0)
self.variables.append(self.Wf0)
self.variables.append(self.bf0)
with tf.variable_scope('out'):
self.Wo = tf.get_variable(name='weights',
initializer=tf.truncated_normal([64, 1]))
self.bo = tf.get_variable(name='bias', initializer=tf.zeros([1]))
self.state_values = tf.reshape(tf.add(tf.matmul(self.fc0, self.Wo), self.bo), [-1]) # batch_size
self.variables.append(self.Wo)
self.variables.append(self.bo)
# Train operation
self.loss = tf.reduce_mean(tf.square(self.state_values - self.state_values_target))
self.train_op = tf.train.RMSPropOptimizer(learning_rate=self.learning_rate, momentum=0.95,
epsilon=0.01).minimize(self.loss)
def get_state_value(self, sess, states):
state_values = sess.run(self.state_values, {self.input_state: states})
return state_values
def save(self, sess, filename):
saver = tf.train.Saver(self.variables)
saver.save(sess, filename)
def load(self, sess, filename):
saver = tf.train.Saver(self.variables)
saver.restore(sess, filename) | 0.819026 | 0.31258 |
import json
import math
import os
from bnlcrl import visualize as vis
from bnlcrl.utils import convert_types, defaults_file, read_json
parms = defaults_file(suffix='delta')
DAT_DIR = parms['dat_dir']
CONFIG_DIR = parms['config_dir']
DEFAULTS_FILE = parms['defaults_file']
class DeltaFinder:
def __init__(self, **kwargs):
# Check importable libs:
self._check_imports()
# Get input variables:
d = read_json(DEFAULTS_FILE)
self.server_info = d['server_info']
self.parameters = convert_types(d['parameters'])
self.default_e_min = self.parameters['e_min']['type'](self.parameters['e_min']['default'])
self.default_e_max = self.parameters['e_max']['type'](self.parameters['e_max']['default'])
for key, default_val in self.parameters.items():
if key in kwargs.keys():
setattr(self, key, self.parameters[key]['type'](kwargs[key]))
elif not hasattr(self, key) or getattr(self, key) is None:
setattr(self, key, default_val['default'])
self.characteristic_value = None
self.analytical_delta = None
self.closest_energy = None
self.content = None
self.raw_content = None
self.method = None # can be 'file', 'server', 'calculation'
self.output = None
self.elements = self.formula.split(',')
self.element = self.elements[-1]
if self.outfile:
self.save_to_file()
return
if not self.data_file:
self.method = 'server'
self._request_from_server()
else:
self.method = 'file'
self.data_file = os.path.join(DAT_DIR, self.data_file)
if self.calc_delta:
if self.available_libs['periodictable']:
self.method = 'calculation'
self.calculate_delta()
self.characteristic_value = self.analytical_delta
self.closest_energy = self.energy
else:
raise ValueError('"periodictable" library is not available. Install it if you want to use it.')
else:
self._find_characteristic_value()
if self.verbose:
self.print_info()
if self.save_output:
return_dict = {}
for k in d['cli_functions']['find_delta']['returns']:
return_dict[k] = getattr(self, k)
file_name = '{}.json'.format(_output_file_name(self.elements, self.characteristic))
with open(file_name, 'w') as f:
json.dump(return_dict, f)
def calculate_delta(self):
rho = getattr(self.periodictable, self.formula).density
z = getattr(self.periodictable, self.formula).number
mass = getattr(self.periodictable, self.formula).mass
z_over_a = z / mass
wl = 2 * math.pi * 1973 / self.energy # lambda= (2pi (hc))/E
self.analytical_delta = 2.7e-6 * wl ** 2 * rho * z_over_a
def print_info(self):
msg = 'Found {}={} for the closest energy={} eV from {}.'
print(msg.format(self.characteristic, self.characteristic_value, self.closest_energy, self.method))
def save_to_file(self):
self.e_min = self.default_e_min
self.e_max = self.e_min
counter = 0
try:
os.remove(self.outfile)
except:
pass
while self.e_max < self.default_e_max:
self.e_max += self.n_points * self.e_step
if self.e_max > self.default_e_max:
self.e_max = self.default_e_max
self._request_from_server()
if counter > 0:
# Get rid of headers (2 first rows) and the first data row to avoid data overlap:
content = self.content.split('\n')
self.content = '\n'.join(content[3:])
with open(self.outfile, 'a') as f:
f.write(self.content)
counter += 1
self.e_min = self.e_max
if self.verbose:
print('Data from {} eV to {} eV saved to the <{}> file.'.format(
self.default_e_min, self.default_e_max, self.outfile))
print('Energy step: {} eV, number of points/chunk: {}, number of chunks {}.'.format(
self.e_step, self.n_points, counter))
def _check_imports(self):
self.available_libs = {
'numpy': None,
'periodictable': None,
'requests': None,
}
for key in self.available_libs.keys():
try:
__import__(key)
setattr(self, key, __import__(key))
self.available_libs[key] = True
except:
self.available_libs[key] = False
def _find_characteristic_value(self):
skiprows = 2
energy_column = 0
characteristic_value_column = 1
error_msg = 'Error! Use energy range from {} to {} eV.'
if self.use_numpy and self.available_libs['numpy']:
if self.data_file:
data = self.numpy.loadtxt(self.data_file, skiprows=skiprows)
self.default_e_min = data[0, energy_column]
self.default_e_max = data[-1, energy_column]
try:
idx_previous = self.numpy.where(data[:, energy_column] <= self.energy)[0][-1]
idx_next = self.numpy.where(data[:, energy_column] > self.energy)[0][0]
except IndexError:
raise Exception(error_msg.format(self.default_e_min, self.default_e_max))
idx = idx_previous if abs(data[idx_previous, energy_column] - self.energy) <= abs(
data[idx_next, energy_column] - self.energy) else idx_next
self.characteristic_value = data[idx][characteristic_value_column]
self.closest_energy = data[idx][energy_column]
else:
raise Exception('Processing with NumPy is only possible with the specified file, not content.')
else:
if not self.content:
with open(self.data_file, 'r') as f:
self.raw_content = f.read()
else:
if type(self.content) != list:
self.raw_content = self.content
self.content = self.raw_content.strip().split('\n')
energies = []
characteristic_values = []
for i in range(skiprows, len(self.content)):
energies.append(float(self.content[i].split()[energy_column]))
characteristic_values.append(float(self.content[i].split()[characteristic_value_column]))
self.default_e_min = energies[0]
self.default_e_max = energies[-1]
indices_previous = []
indices_next = []
try:
for i in range(len(energies)):
if energies[i] <= self.energy:
indices_previous.append(i)
else:
indices_next.append(i)
idx_previous = indices_previous[-1]
idx_next = indices_next[0]
except IndexError:
raise Exception(error_msg.format(self.default_e_min, self.default_e_max))
idx = idx_previous if abs(energies[idx_previous] - self.energy) <= abs(
energies[idx_next] - self.energy) else idx_next
self.characteristic_value = characteristic_values[idx]
self.closest_energy = energies[idx]
if self.characteristic == 'atten':
self.characteristic_value *= 1e-6 # Atten Length (microns)
def _get_remote_file_content(self):
get_url = '{}{}'.format(self.server_info['server'], self.file_name)
r = self.requests.get(get_url)
self.content = r.text
return self.content
def _get_remote_file_name(self, formula=None):
if self.precise:
e_min = self.energy - 1.0
e_max = self.energy + 1.0
else:
e_min = self.e_min
e_max = self.e_max
payload = {
self.server_info[self.characteristic]['fields']['density']: -1,
self.server_info[self.characteristic]['fields']['formula']: formula,
self.server_info[self.characteristic]['fields']['material']: 'Enter Formula',
self.server_info[self.characteristic]['fields']['max']: e_max,
self.server_info[self.characteristic]['fields']['min']: e_min,
self.server_info[self.characteristic]['fields']['npts']: self.n_points,
self.server_info[self.characteristic]['fields']['output']: 'Text File',
self.server_info[self.characteristic]['fields']['scan']: 'Energy',
}
if self.characteristic == 'atten':
payload[self.server_info[self.characteristic]['fields']['fixed']] = 90.0
payload[self.server_info[self.characteristic]['fields']['plot']] = 'Log'
payload[self.server_info[self.characteristic]['fields']['output']] = 'Plot',
elif self.characteristic == 'transmission':
payload[self.server_info[self.characteristic]['fields']['plot']] = 'Linear'
payload[self.server_info[self.characteristic]['fields']['output']] = 'Plot',
payload[self.server_info[self.characteristic]['fields']['thickness']] = self.thickness # um
r = self.requests.post(
'{}{}'.format(self.server_info['server'], self.server_info[self.characteristic]['post_url']),
payload
)
content = r.text
# The file name should be something like '/tmp/xray2565.dat':
try:
self.file_name = str(
content.split('{}='.format(self.server_info[self.characteristic]['file_tag']))[1]
.split('>')[0]
.replace('"', '')
)
except:
raise Exception('\n\nFile name cannot be found! Server response:\n<{}>'.format(content.strip()))
def _request_from_server(self):
if self.available_libs['requests']:
d = []
for f in self.formula.split(','): # to support multiple chemical elements comma-separated list
self._get_remote_file_name(formula=f)
r = self._get_remote_file_content()
d.append(r)
if self.plot or self.save:
df, columns = vis.to_dataframe(d, self.elements)
if df is not None and columns is not None:
file_name = _output_file_name(self.elements, self.characteristic)
if self.plot:
vis.plot_data(
df=df,
elements=self.elements,
property=self.characteristic,
thickness=self.thickness,
e_min=self.e_min,
e_max=self.e_max,
n_points=self.n_points,
file_name=file_name,
x_label=columns[0],
show_plot=self.show_plot,
)
if self.save:
vis.save_to_csv(df=df, file_name=file_name)
else:
msg = 'Cannot use online resource <{}> to get {}. Use local file instead.'
raise Exception(msg.format(self.server_info['server'], self.characteristic))
def _output_file_name(elements, characteristic):
return '{}_{}'.format(','.join(elements), characteristic) if len(elements) > 1 else characteristic | bnlcrl/delta_finder.py | import json
import math
import os
from bnlcrl import visualize as vis
from bnlcrl.utils import convert_types, defaults_file, read_json
parms = defaults_file(suffix='delta')
DAT_DIR = parms['dat_dir']
CONFIG_DIR = parms['config_dir']
DEFAULTS_FILE = parms['defaults_file']
class DeltaFinder:
def __init__(self, **kwargs):
# Check importable libs:
self._check_imports()
# Get input variables:
d = read_json(DEFAULTS_FILE)
self.server_info = d['server_info']
self.parameters = convert_types(d['parameters'])
self.default_e_min = self.parameters['e_min']['type'](self.parameters['e_min']['default'])
self.default_e_max = self.parameters['e_max']['type'](self.parameters['e_max']['default'])
for key, default_val in self.parameters.items():
if key in kwargs.keys():
setattr(self, key, self.parameters[key]['type'](kwargs[key]))
elif not hasattr(self, key) or getattr(self, key) is None:
setattr(self, key, default_val['default'])
self.characteristic_value = None
self.analytical_delta = None
self.closest_energy = None
self.content = None
self.raw_content = None
self.method = None # can be 'file', 'server', 'calculation'
self.output = None
self.elements = self.formula.split(',')
self.element = self.elements[-1]
if self.outfile:
self.save_to_file()
return
if not self.data_file:
self.method = 'server'
self._request_from_server()
else:
self.method = 'file'
self.data_file = os.path.join(DAT_DIR, self.data_file)
if self.calc_delta:
if self.available_libs['periodictable']:
self.method = 'calculation'
self.calculate_delta()
self.characteristic_value = self.analytical_delta
self.closest_energy = self.energy
else:
raise ValueError('"periodictable" library is not available. Install it if you want to use it.')
else:
self._find_characteristic_value()
if self.verbose:
self.print_info()
if self.save_output:
return_dict = {}
for k in d['cli_functions']['find_delta']['returns']:
return_dict[k] = getattr(self, k)
file_name = '{}.json'.format(_output_file_name(self.elements, self.characteristic))
with open(file_name, 'w') as f:
json.dump(return_dict, f)
def calculate_delta(self):
rho = getattr(self.periodictable, self.formula).density
z = getattr(self.periodictable, self.formula).number
mass = getattr(self.periodictable, self.formula).mass
z_over_a = z / mass
wl = 2 * math.pi * 1973 / self.energy # lambda= (2pi (hc))/E
self.analytical_delta = 2.7e-6 * wl ** 2 * rho * z_over_a
def print_info(self):
msg = 'Found {}={} for the closest energy={} eV from {}.'
print(msg.format(self.characteristic, self.characteristic_value, self.closest_energy, self.method))
def save_to_file(self):
self.e_min = self.default_e_min
self.e_max = self.e_min
counter = 0
try:
os.remove(self.outfile)
except:
pass
while self.e_max < self.default_e_max:
self.e_max += self.n_points * self.e_step
if self.e_max > self.default_e_max:
self.e_max = self.default_e_max
self._request_from_server()
if counter > 0:
# Get rid of headers (2 first rows) and the first data row to avoid data overlap:
content = self.content.split('\n')
self.content = '\n'.join(content[3:])
with open(self.outfile, 'a') as f:
f.write(self.content)
counter += 1
self.e_min = self.e_max
if self.verbose:
print('Data from {} eV to {} eV saved to the <{}> file.'.format(
self.default_e_min, self.default_e_max, self.outfile))
print('Energy step: {} eV, number of points/chunk: {}, number of chunks {}.'.format(
self.e_step, self.n_points, counter))
def _check_imports(self):
self.available_libs = {
'numpy': None,
'periodictable': None,
'requests': None,
}
for key in self.available_libs.keys():
try:
__import__(key)
setattr(self, key, __import__(key))
self.available_libs[key] = True
except:
self.available_libs[key] = False
def _find_characteristic_value(self):
skiprows = 2
energy_column = 0
characteristic_value_column = 1
error_msg = 'Error! Use energy range from {} to {} eV.'
if self.use_numpy and self.available_libs['numpy']:
if self.data_file:
data = self.numpy.loadtxt(self.data_file, skiprows=skiprows)
self.default_e_min = data[0, energy_column]
self.default_e_max = data[-1, energy_column]
try:
idx_previous = self.numpy.where(data[:, energy_column] <= self.energy)[0][-1]
idx_next = self.numpy.where(data[:, energy_column] > self.energy)[0][0]
except IndexError:
raise Exception(error_msg.format(self.default_e_min, self.default_e_max))
idx = idx_previous if abs(data[idx_previous, energy_column] - self.energy) <= abs(
data[idx_next, energy_column] - self.energy) else idx_next
self.characteristic_value = data[idx][characteristic_value_column]
self.closest_energy = data[idx][energy_column]
else:
raise Exception('Processing with NumPy is only possible with the specified file, not content.')
else:
if not self.content:
with open(self.data_file, 'r') as f:
self.raw_content = f.read()
else:
if type(self.content) != list:
self.raw_content = self.content
self.content = self.raw_content.strip().split('\n')
energies = []
characteristic_values = []
for i in range(skiprows, len(self.content)):
energies.append(float(self.content[i].split()[energy_column]))
characteristic_values.append(float(self.content[i].split()[characteristic_value_column]))
self.default_e_min = energies[0]
self.default_e_max = energies[-1]
indices_previous = []
indices_next = []
try:
for i in range(len(energies)):
if energies[i] <= self.energy:
indices_previous.append(i)
else:
indices_next.append(i)
idx_previous = indices_previous[-1]
idx_next = indices_next[0]
except IndexError:
raise Exception(error_msg.format(self.default_e_min, self.default_e_max))
idx = idx_previous if abs(energies[idx_previous] - self.energy) <= abs(
energies[idx_next] - self.energy) else idx_next
self.characteristic_value = characteristic_values[idx]
self.closest_energy = energies[idx]
if self.characteristic == 'atten':
self.characteristic_value *= 1e-6 # Atten Length (microns)
def _get_remote_file_content(self):
get_url = '{}{}'.format(self.server_info['server'], self.file_name)
r = self.requests.get(get_url)
self.content = r.text
return self.content
def _get_remote_file_name(self, formula=None):
if self.precise:
e_min = self.energy - 1.0
e_max = self.energy + 1.0
else:
e_min = self.e_min
e_max = self.e_max
payload = {
self.server_info[self.characteristic]['fields']['density']: -1,
self.server_info[self.characteristic]['fields']['formula']: formula,
self.server_info[self.characteristic]['fields']['material']: 'Enter Formula',
self.server_info[self.characteristic]['fields']['max']: e_max,
self.server_info[self.characteristic]['fields']['min']: e_min,
self.server_info[self.characteristic]['fields']['npts']: self.n_points,
self.server_info[self.characteristic]['fields']['output']: 'Text File',
self.server_info[self.characteristic]['fields']['scan']: 'Energy',
}
if self.characteristic == 'atten':
payload[self.server_info[self.characteristic]['fields']['fixed']] = 90.0
payload[self.server_info[self.characteristic]['fields']['plot']] = 'Log'
payload[self.server_info[self.characteristic]['fields']['output']] = 'Plot',
elif self.characteristic == 'transmission':
payload[self.server_info[self.characteristic]['fields']['plot']] = 'Linear'
payload[self.server_info[self.characteristic]['fields']['output']] = 'Plot',
payload[self.server_info[self.characteristic]['fields']['thickness']] = self.thickness # um
r = self.requests.post(
'{}{}'.format(self.server_info['server'], self.server_info[self.characteristic]['post_url']),
payload
)
content = r.text
# The file name should be something like '/tmp/xray2565.dat':
try:
self.file_name = str(
content.split('{}='.format(self.server_info[self.characteristic]['file_tag']))[1]
.split('>')[0]
.replace('"', '')
)
except:
raise Exception('\n\nFile name cannot be found! Server response:\n<{}>'.format(content.strip()))
def _request_from_server(self):
if self.available_libs['requests']:
d = []
for f in self.formula.split(','): # to support multiple chemical elements comma-separated list
self._get_remote_file_name(formula=f)
r = self._get_remote_file_content()
d.append(r)
if self.plot or self.save:
df, columns = vis.to_dataframe(d, self.elements)
if df is not None and columns is not None:
file_name = _output_file_name(self.elements, self.characteristic)
if self.plot:
vis.plot_data(
df=df,
elements=self.elements,
property=self.characteristic,
thickness=self.thickness,
e_min=self.e_min,
e_max=self.e_max,
n_points=self.n_points,
file_name=file_name,
x_label=columns[0],
show_plot=self.show_plot,
)
if self.save:
vis.save_to_csv(df=df, file_name=file_name)
else:
msg = 'Cannot use online resource <{}> to get {}. Use local file instead.'
raise Exception(msg.format(self.server_info['server'], self.characteristic))
def _output_file_name(elements, characteristic):
return '{}_{}'.format(','.join(elements), characteristic) if len(elements) > 1 else characteristic | 0.418222 | 0.19475 |
import logging
from . import mixin
from . import core
from . import Constructs
from .decorators import (
_display_or_return,
_inplace_enabled,
_inplace_enabled_define_and_cleanup,
_manage_log_level_via_verbosity,
)
logger = logging.getLogger(__name__)
class Domain(mixin.FieldDomain, mixin.Container, core.Domain):
"""A domain of the CF data model.
The domain represents a set of discrete "locations" in what
generally would be a multi-dimensional space, either in the real
world or in a model's simulated world. These locations correspond
to individual data array elements of a field construct
The domain is defined collectively by the following constructs of
the CF data model: domain axis, dimension coordinate, auxiliary
coordinate, cell measure, coordinate reference and domain
ancillary constructs.
.. versionadded:: (cfdm) 1.7.0
"""
def __new__(cls, *args, **kwargs):
"""This must be overridden in subclasses.
.. versionadded:: (cfdm) 1.7.0
"""
instance = super().__new__(cls)
instance._Constructs = Constructs
return instance
def __repr__(self):
"""Called by the `repr` built-in function.
x.__repr__() <==> repr(x)
"""
shape = sorted(
[
domain_axis.get_size(None)
for domain_axis in self.domain_axes(todict=True).values()
]
)
shape = str(shape)
shape = shape[1:-1]
return f"<{self.__class__.__name__}: {{{shape}}}>"
def __str__(self):
"""Called by the `str` built-in function.
x.__str__() <==> str(x)
"""
def _print_item(self, cid, variable, axes):
"""Private function called by __str__."""
x = [variable.identity(default=f"key%{cid}")]
if variable.has_data():
shape = [axis_names[axis] for axis in axes]
shape = str(tuple(shape)).replace("'", "")
shape = shape.replace(",)", ")")
x.append(shape)
elif (
variable.construct_type
in ("auxiliary_coordinate", "domain_ancillary")
and variable.has_bounds()
and variable.bounds.has_data()
):
# Construct has no data but it does have bounds
shape = [axis_names[axis] for axis in axes]
shape.extend(
[str(n) for n in variable.bounds.data.shape[len(axes) :]]
)
shape = str(tuple(shape)).replace("'", "")
shape = shape.replace(",)", ")")
x.append(shape)
elif (
hasattr(variable, "nc_get_external")
and variable.nc_get_external()
):
ncvar = variable.nc_get_variable(None)
if ncvar is not None:
x.append(f" (external variable: ncvar%{ncvar}")
else:
x.append(" (external variable)")
if variable.has_data():
x.append(f" = {variable.data}")
elif (
variable.construct_type
in ("auxiliary_coordinate", "domain_ancillary")
and variable.has_bounds()
and variable.bounds.has_data()
):
# Construct has no data but it does have bounds data
x.append(f" = {variable.bounds.data}")
return "".join(x)
string = []
axis_names = self._unique_domain_axis_identities()
construct_data_axes = self.constructs.data_axes()
x = []
dimension_coordinates = self.dimension_coordinates(todict=True)
for axis_cid in sorted(self.domain_axes(todict=True)):
for cid, dim in dimension_coordinates.items():
if construct_data_axes[cid] == (axis_cid,):
name = dim.identity(default=f"key%{0}")
y = "{0}({1})".format(name, dim.get_data().size)
if y != axis_names[axis_cid]:
y = "{0}({1})".format(name, axis_names[axis_cid])
if dim.has_data():
y += " = {0}".format(dim.get_data())
x.append(y)
if x:
x = "\n : ".join(x)
string.append(f"Dimension coords: {x}")
# Auxiliary coordinates
x = [
_print_item(self, cid, v, construct_data_axes[cid])
for cid, v in sorted(
self.auxiliary_coordinates(todict=True).items()
)
]
if x:
x = "\n : ".join(x)
string.append(f"Auxiliary coords: {x}")
# Cell measures
x = [
_print_item(self, cid, v, construct_data_axes[cid])
for cid, v in sorted(self.cell_measures(todict=True).items())
]
if x:
x = "\n : ".join(x)
string.append(f"Cell measures : {x}")
# Coordinate references
x = sorted(
[
str(ref)
for ref in list(
self.coordinate_references(todict=True).values()
)
]
)
if x:
x = "\n : ".join(x)
string.append(f"Coord references: {x}")
# Domain ancillary variables
x = [
_print_item(self, cid, anc, construct_data_axes[cid])
for cid, anc in sorted(
self.domain_ancillaries(todict=True).items()
)
]
if x:
x = "\n : ".join(x)
string.append(f"Domain ancils : {x}")
return "\n".join(string)
@_display_or_return
def _dump_axes(self, axis_names, display=True, _level=0):
"""Returns a string description of the field's domain axes.
:Parameters:
display: `bool`, optional
If False then return the description as a string. By
default the description is printed.
_level: `int`, optional
:Returns:
`str`
A string containing the description.
**Examples:**
"""
indent1 = " " * _level
w = sorted(
[
f"{indent1}Domain Axis: {axis_names[axis]}"
for axis in self.domain_axes(todict=True)
]
)
return "\n".join(w)
def _one_line_description(self, axis_names_sizes=None):
"""Return a one-line description of the domain.
:Returns:
`str`
The description.
"""
if axis_names_sizes is None:
axis_names_sizes = self._unique_domain_axis_identities()
axis_names = ", ".join(sorted(axis_names_sizes.values()))
return f"{self.identity('')}{{{axis_names}}}"
@_inplace_enabled(default=False)
def apply_masking(self, inplace=False):
"""Apply masking as defined by the CF conventions.
Masking is applied to all metadata constructs with data.
Masking is applied according to any of the following criteria
that are applicable:
* where data elements are equal to the value of the
``missing_value`` property;
* where data elements are equal to the value of the
``_FillValue`` property;
* where data elements are strictly less than the value of the
``valid_min`` property;
* where data elements are strictly greater than the value of
the ``valid_max`` property;
* where data elements are within the inclusive range specified
by the two values of ``valid_range`` property.
If any of the above properties have not been set the no
masking is applied for that method.
Elements that are already masked remain so.
.. note:: If using the `apply_masking` method on a construct
that has been read from a dataset with the
``mask=False`` parameter to the `read` function,
then the mask defined in the dataset can only be
recreated if the ``missing_value``, ``_FillValue``,
``valid_min``, ``valid_max``, and ``valid_range``
properties have not been updated.
.. versionadded:: (cfdm) 1.8.9.0
.. seealso:: `{{package}}.Data.apply_masking`, `read`, `write`
:Parameters:
{{inplace: `bool`, optional}}
:Returns:
`Domain` or `None`
A new domain construct with masked values, or `None`
if the operation was in-place.
**Examples:**
>>> d = cfdm.example_field(0).domain
>>> x = d.construct('longitude')
>>> x.data[[0, -1]] = cfdm.masked
>>> print(x.data.array)
[-- 67.5 112.5 157.5 202.5 247.5 292.5 --]
>>> cfdm.write(d, 'masked.nc')
>>> no_mask = {{package}}.read('masked.nc', domain=True, mask=False)[0]
>>> no_mask_x = no_mask.construct('longitude')
>>> print(no_mask_x.data.array)
[9.96920997e+36 6.75000000e+01 1.12500000e+02 1.57500000e+02
2.02500000e+02 2.47500000e+02 2.92500000e+02 9.96920997e+36]
>>> masked = no_mask.apply_masking()
>>> masked_x = masked.construct('longitude')
>>> print(masked_x.data.array)
[-- 67.5 112.5 157.5 202.5 247.5
"""
d = _inplace_enabled_define_and_cleanup(self)
# Apply masking to the metadata constructs
d._apply_masking_constructs()
return d
def climatological_time_axes(self):
"""Return all axes which are climatological time axes.
This is ascertained by inspecting the values returned by each
coordinate construct's `is_climatology` method.
.. versionadded:: (cfdm) 1.8.9.0
:Returns:
`set`
The keys of the domain axis constructs that are
climatological time axes.
**Examples:**
>>> d = cfdm.example_field(0)
>>> d.climatological_time_axes()
set()
"""
data_axes = self.constructs.data_axes()
out = []
for ckey, c in self.coordinates(todict=True).items():
if not c.is_climatology():
continue
out.extend(data_axes.get(ckey, ()))
return set(out)
@_display_or_return
def dump(self, display=True, _level=0, _title=None):
"""A full description of the domain.
The domain components are described without abbreviation with the
exception of data arrays, which are abbreviated to their first and
last values.
.. versionadded:: (cfdm) 1.7.0
:Parameters:
display: `bool`, optional
If False then return the description as a string. By
default the description is printed.
*Parameter example:*
``f.dump()`` is equivalent to ``print
f.dump(display=False)``.
:Returns:
`str` or `None`
If *display* is True then the description is printed and
`None` is returned. Otherwise the description is returned
as a string.
"""
axis_to_name = self._unique_domain_axis_identities()
construct_name = self._unique_construct_names()
construct_data_axes = self.constructs.data_axes()
string = []
# Domain axes
axes = self._dump_axes(axis_to_name, display=False, _level=_level)
if axes:
string.append(axes)
# Dimension coordinates
dimension_coordinates = self.dimension_coordinates(todict=True)
for cid, value in sorted(dimension_coordinates.items()):
string.append("")
string.append(
value.dump(
display=False,
_level=_level,
_title=f"Dimension coordinate: {construct_name[cid]}",
_axes=construct_data_axes[cid],
_axis_names=axis_to_name,
)
)
# Auxiliary coordinates
auxiliary_coordinates = self.auxiliary_coordinates(todict=True)
for cid, value in sorted(auxiliary_coordinates.items()):
string.append("")
string.append(
value.dump(
display=False,
_level=_level,
_title=f"Auxiliary coordinate: {construct_name[cid]}",
_axes=construct_data_axes[cid],
_axis_names=axis_to_name,
)
)
# Domain ancillaries
for cid, value in sorted(self.domain_ancillaries(todict=True).items()):
string.append("")
string.append(
value.dump(
display=False,
_level=_level,
_title=f"Domain ancillary: {construct_name[cid]}",
_axes=construct_data_axes[cid],
_axis_names=axis_to_name,
)
)
# Coordinate references
for cid, value in sorted(
self.coordinate_references(todict=True).items()
):
string.append("")
string.append(
value.dump(
display=False,
_level=_level,
_title=f"Coordinate reference: {construct_name[cid]}",
_construct_names=construct_name,
_auxiliary_coordinates=tuple(auxiliary_coordinates),
_dimension_coordinates=tuple(dimension_coordinates),
)
)
# Cell measures
for cid, value in sorted(self.cell_measures(todict=True).items()):
string.append("")
string.append(
value.dump(
display=False,
_key=cid,
_level=_level,
_title=f"Cell measure: {construct_name[cid]}",
_axes=construct_data_axes[cid],
_axis_names=axis_to_name,
)
)
string.append("")
return "\n".join(string)
@_manage_log_level_via_verbosity
def equals(
self,
other,
rtol=None,
atol=None,
verbose=None,
ignore_data_type=False,
ignore_fill_value=False,
ignore_compression=True,
ignore_type=False,
):
"""Whether two domains are the same.
.. versionadded:: (cfdm) 1.7.0
:Returns:
`bool`
**Examples:**
>>> d.equals(d)
True
>>> d.equals(d.copy())
True
>>> d.equals('not a domain')
False
"""
pp = super()._equals_preprocess(
other, verbose=verbose, ignore_type=ignore_type
)
if pp is True or pp is False:
return pp
other = pp
# ------------------------------------------------------------
# Check the constructs
# ------------------------------------------------------------
if not self._equals(
self.constructs,
other.constructs,
rtol=rtol,
atol=atol,
verbose=verbose,
ignore_data_type=ignore_data_type,
ignore_fill_value=ignore_fill_value,
ignore_compression=ignore_compression,
):
logger.info(
f"{self.__class__.__name__}: Different metadata constructs"
)
return False
return True
def get_filenames(self):
"""Return the file names containing the metadata construct data.
:Returns:
`set`
The file names in normalized, absolute form. If all of
the data are in memory then an empty `set` is
returned.
**Examples:**
>>> d = {{package}}.example_field(0).domain
>>> {{package}}.write(d, 'temp_file.nc')
>>> e = {{package}}.read('temp_file.nc', domain=True)[0]
>>> e.get_filenames()
{'temp_file.nc'}
"""
out = set()
for c in self.constructs.filter_by_data().values():
out.update(c.get_filenames())
return out | cfdm/domain.py | import logging
from . import mixin
from . import core
from . import Constructs
from .decorators import (
_display_or_return,
_inplace_enabled,
_inplace_enabled_define_and_cleanup,
_manage_log_level_via_verbosity,
)
logger = logging.getLogger(__name__)
class Domain(mixin.FieldDomain, mixin.Container, core.Domain):
"""A domain of the CF data model.
The domain represents a set of discrete "locations" in what
generally would be a multi-dimensional space, either in the real
world or in a model's simulated world. These locations correspond
to individual data array elements of a field construct
The domain is defined collectively by the following constructs of
the CF data model: domain axis, dimension coordinate, auxiliary
coordinate, cell measure, coordinate reference and domain
ancillary constructs.
.. versionadded:: (cfdm) 1.7.0
"""
def __new__(cls, *args, **kwargs):
"""This must be overridden in subclasses.
.. versionadded:: (cfdm) 1.7.0
"""
instance = super().__new__(cls)
instance._Constructs = Constructs
return instance
def __repr__(self):
"""Called by the `repr` built-in function.
x.__repr__() <==> repr(x)
"""
shape = sorted(
[
domain_axis.get_size(None)
for domain_axis in self.domain_axes(todict=True).values()
]
)
shape = str(shape)
shape = shape[1:-1]
return f"<{self.__class__.__name__}: {{{shape}}}>"
def __str__(self):
"""Called by the `str` built-in function.
x.__str__() <==> str(x)
"""
def _print_item(self, cid, variable, axes):
"""Private function called by __str__."""
x = [variable.identity(default=f"key%{cid}")]
if variable.has_data():
shape = [axis_names[axis] for axis in axes]
shape = str(tuple(shape)).replace("'", "")
shape = shape.replace(",)", ")")
x.append(shape)
elif (
variable.construct_type
in ("auxiliary_coordinate", "domain_ancillary")
and variable.has_bounds()
and variable.bounds.has_data()
):
# Construct has no data but it does have bounds
shape = [axis_names[axis] for axis in axes]
shape.extend(
[str(n) for n in variable.bounds.data.shape[len(axes) :]]
)
shape = str(tuple(shape)).replace("'", "")
shape = shape.replace(",)", ")")
x.append(shape)
elif (
hasattr(variable, "nc_get_external")
and variable.nc_get_external()
):
ncvar = variable.nc_get_variable(None)
if ncvar is not None:
x.append(f" (external variable: ncvar%{ncvar}")
else:
x.append(" (external variable)")
if variable.has_data():
x.append(f" = {variable.data}")
elif (
variable.construct_type
in ("auxiliary_coordinate", "domain_ancillary")
and variable.has_bounds()
and variable.bounds.has_data()
):
# Construct has no data but it does have bounds data
x.append(f" = {variable.bounds.data}")
return "".join(x)
string = []
axis_names = self._unique_domain_axis_identities()
construct_data_axes = self.constructs.data_axes()
x = []
dimension_coordinates = self.dimension_coordinates(todict=True)
for axis_cid in sorted(self.domain_axes(todict=True)):
for cid, dim in dimension_coordinates.items():
if construct_data_axes[cid] == (axis_cid,):
name = dim.identity(default=f"key%{0}")
y = "{0}({1})".format(name, dim.get_data().size)
if y != axis_names[axis_cid]:
y = "{0}({1})".format(name, axis_names[axis_cid])
if dim.has_data():
y += " = {0}".format(dim.get_data())
x.append(y)
if x:
x = "\n : ".join(x)
string.append(f"Dimension coords: {x}")
# Auxiliary coordinates
x = [
_print_item(self, cid, v, construct_data_axes[cid])
for cid, v in sorted(
self.auxiliary_coordinates(todict=True).items()
)
]
if x:
x = "\n : ".join(x)
string.append(f"Auxiliary coords: {x}")
# Cell measures
x = [
_print_item(self, cid, v, construct_data_axes[cid])
for cid, v in sorted(self.cell_measures(todict=True).items())
]
if x:
x = "\n : ".join(x)
string.append(f"Cell measures : {x}")
# Coordinate references
x = sorted(
[
str(ref)
for ref in list(
self.coordinate_references(todict=True).values()
)
]
)
if x:
x = "\n : ".join(x)
string.append(f"Coord references: {x}")
# Domain ancillary variables
x = [
_print_item(self, cid, anc, construct_data_axes[cid])
for cid, anc in sorted(
self.domain_ancillaries(todict=True).items()
)
]
if x:
x = "\n : ".join(x)
string.append(f"Domain ancils : {x}")
return "\n".join(string)
@_display_or_return
def _dump_axes(self, axis_names, display=True, _level=0):
"""Returns a string description of the field's domain axes.
:Parameters:
display: `bool`, optional
If False then return the description as a string. By
default the description is printed.
_level: `int`, optional
:Returns:
`str`
A string containing the description.
**Examples:**
"""
indent1 = " " * _level
w = sorted(
[
f"{indent1}Domain Axis: {axis_names[axis]}"
for axis in self.domain_axes(todict=True)
]
)
return "\n".join(w)
def _one_line_description(self, axis_names_sizes=None):
"""Return a one-line description of the domain.
:Returns:
`str`
The description.
"""
if axis_names_sizes is None:
axis_names_sizes = self._unique_domain_axis_identities()
axis_names = ", ".join(sorted(axis_names_sizes.values()))
return f"{self.identity('')}{{{axis_names}}}"
@_inplace_enabled(default=False)
def apply_masking(self, inplace=False):
"""Apply masking as defined by the CF conventions.
Masking is applied to all metadata constructs with data.
Masking is applied according to any of the following criteria
that are applicable:
* where data elements are equal to the value of the
``missing_value`` property;
* where data elements are equal to the value of the
``_FillValue`` property;
* where data elements are strictly less than the value of the
``valid_min`` property;
* where data elements are strictly greater than the value of
the ``valid_max`` property;
* where data elements are within the inclusive range specified
by the two values of ``valid_range`` property.
If any of the above properties have not been set the no
masking is applied for that method.
Elements that are already masked remain so.
.. note:: If using the `apply_masking` method on a construct
that has been read from a dataset with the
``mask=False`` parameter to the `read` function,
then the mask defined in the dataset can only be
recreated if the ``missing_value``, ``_FillValue``,
``valid_min``, ``valid_max``, and ``valid_range``
properties have not been updated.
.. versionadded:: (cfdm) 1.8.9.0
.. seealso:: `{{package}}.Data.apply_masking`, `read`, `write`
:Parameters:
{{inplace: `bool`, optional}}
:Returns:
`Domain` or `None`
A new domain construct with masked values, or `None`
if the operation was in-place.
**Examples:**
>>> d = cfdm.example_field(0).domain
>>> x = d.construct('longitude')
>>> x.data[[0, -1]] = cfdm.masked
>>> print(x.data.array)
[-- 67.5 112.5 157.5 202.5 247.5 292.5 --]
>>> cfdm.write(d, 'masked.nc')
>>> no_mask = {{package}}.read('masked.nc', domain=True, mask=False)[0]
>>> no_mask_x = no_mask.construct('longitude')
>>> print(no_mask_x.data.array)
[9.96920997e+36 6.75000000e+01 1.12500000e+02 1.57500000e+02
2.02500000e+02 2.47500000e+02 2.92500000e+02 9.96920997e+36]
>>> masked = no_mask.apply_masking()
>>> masked_x = masked.construct('longitude')
>>> print(masked_x.data.array)
[-- 67.5 112.5 157.5 202.5 247.5
"""
d = _inplace_enabled_define_and_cleanup(self)
# Apply masking to the metadata constructs
d._apply_masking_constructs()
return d
def climatological_time_axes(self):
"""Return all axes which are climatological time axes.
This is ascertained by inspecting the values returned by each
coordinate construct's `is_climatology` method.
.. versionadded:: (cfdm) 1.8.9.0
:Returns:
`set`
The keys of the domain axis constructs that are
climatological time axes.
**Examples:**
>>> d = cfdm.example_field(0)
>>> d.climatological_time_axes()
set()
"""
data_axes = self.constructs.data_axes()
out = []
for ckey, c in self.coordinates(todict=True).items():
if not c.is_climatology():
continue
out.extend(data_axes.get(ckey, ()))
return set(out)
@_display_or_return
def dump(self, display=True, _level=0, _title=None):
"""A full description of the domain.
The domain components are described without abbreviation with the
exception of data arrays, which are abbreviated to their first and
last values.
.. versionadded:: (cfdm) 1.7.0
:Parameters:
display: `bool`, optional
If False then return the description as a string. By
default the description is printed.
*Parameter example:*
``f.dump()`` is equivalent to ``print
f.dump(display=False)``.
:Returns:
`str` or `None`
If *display* is True then the description is printed and
`None` is returned. Otherwise the description is returned
as a string.
"""
axis_to_name = self._unique_domain_axis_identities()
construct_name = self._unique_construct_names()
construct_data_axes = self.constructs.data_axes()
string = []
# Domain axes
axes = self._dump_axes(axis_to_name, display=False, _level=_level)
if axes:
string.append(axes)
# Dimension coordinates
dimension_coordinates = self.dimension_coordinates(todict=True)
for cid, value in sorted(dimension_coordinates.items()):
string.append("")
string.append(
value.dump(
display=False,
_level=_level,
_title=f"Dimension coordinate: {construct_name[cid]}",
_axes=construct_data_axes[cid],
_axis_names=axis_to_name,
)
)
# Auxiliary coordinates
auxiliary_coordinates = self.auxiliary_coordinates(todict=True)
for cid, value in sorted(auxiliary_coordinates.items()):
string.append("")
string.append(
value.dump(
display=False,
_level=_level,
_title=f"Auxiliary coordinate: {construct_name[cid]}",
_axes=construct_data_axes[cid],
_axis_names=axis_to_name,
)
)
# Domain ancillaries
for cid, value in sorted(self.domain_ancillaries(todict=True).items()):
string.append("")
string.append(
value.dump(
display=False,
_level=_level,
_title=f"Domain ancillary: {construct_name[cid]}",
_axes=construct_data_axes[cid],
_axis_names=axis_to_name,
)
)
# Coordinate references
for cid, value in sorted(
self.coordinate_references(todict=True).items()
):
string.append("")
string.append(
value.dump(
display=False,
_level=_level,
_title=f"Coordinate reference: {construct_name[cid]}",
_construct_names=construct_name,
_auxiliary_coordinates=tuple(auxiliary_coordinates),
_dimension_coordinates=tuple(dimension_coordinates),
)
)
# Cell measures
for cid, value in sorted(self.cell_measures(todict=True).items()):
string.append("")
string.append(
value.dump(
display=False,
_key=cid,
_level=_level,
_title=f"Cell measure: {construct_name[cid]}",
_axes=construct_data_axes[cid],
_axis_names=axis_to_name,
)
)
string.append("")
return "\n".join(string)
@_manage_log_level_via_verbosity
def equals(
self,
other,
rtol=None,
atol=None,
verbose=None,
ignore_data_type=False,
ignore_fill_value=False,
ignore_compression=True,
ignore_type=False,
):
"""Whether two domains are the same.
.. versionadded:: (cfdm) 1.7.0
:Returns:
`bool`
**Examples:**
>>> d.equals(d)
True
>>> d.equals(d.copy())
True
>>> d.equals('not a domain')
False
"""
pp = super()._equals_preprocess(
other, verbose=verbose, ignore_type=ignore_type
)
if pp is True or pp is False:
return pp
other = pp
# ------------------------------------------------------------
# Check the constructs
# ------------------------------------------------------------
if not self._equals(
self.constructs,
other.constructs,
rtol=rtol,
atol=atol,
verbose=verbose,
ignore_data_type=ignore_data_type,
ignore_fill_value=ignore_fill_value,
ignore_compression=ignore_compression,
):
logger.info(
f"{self.__class__.__name__}: Different metadata constructs"
)
return False
return True
def get_filenames(self):
"""Return the file names containing the metadata construct data.
:Returns:
`set`
The file names in normalized, absolute form. If all of
the data are in memory then an empty `set` is
returned.
**Examples:**
>>> d = {{package}}.example_field(0).domain
>>> {{package}}.write(d, 'temp_file.nc')
>>> e = {{package}}.read('temp_file.nc', domain=True)[0]
>>> e.get_filenames()
{'temp_file.nc'}
"""
out = set()
for c in self.constructs.filter_by_data().values():
out.update(c.get_filenames())
return out | 0.830525 | 0.406037 |
from unittest import TestCase, main
from tempfile import mkstemp
from os import close, remove
from os.path import exists
import pandas as pd
from qiita_core.util import qiita_test_checker
import qiita_db as qdb
@qiita_test_checker()
class TestSQL(TestCase):
"""Tests that the database triggers and procedures work properly"""
def setUp(self):
self._files_to_remove = []
def tearDown(self):
for fp in self._files_to_remove:
if exists(fp):
remove(fp)
def test_find_artifact_roots_is_root(self):
"""Correctly returns the root if the artifact is already the root"""
with qdb.sql_connection.TRN:
sql = "SELECT * FROM qiita.find_artifact_roots(%s)"
qdb.sql_connection.TRN.add(sql, [1])
obs = qdb.sql_connection.TRN.execute_fetchindex()
exp = [[1]]
self.assertEqual(obs, exp)
def test_find_artifact_roots_is_child(self):
"""Correctly returns the root if the artifact is a child"""
with qdb.sql_connection.TRN:
sql = "SELECT * FROM qiita.find_artifact_roots(%s)"
qdb.sql_connection.TRN.add(sql, [4])
obs = qdb.sql_connection.TRN.execute_fetchindex()
exp = [[1]]
self.assertEqual(obs, exp)
def test_find_artifact_roots_is_child_multiple_parents_one_root(self):
"""Correctly returns the roots if the children has multiple parents
but a single root
"""
fd, fp = mkstemp(suffix='_table.biom')
close(fd)
self._files_to_remove.append(fp)
with open(fp, 'w') as f:
f.write("test")
fp = [(fp, 7)]
params = qdb.software.Parameters.from_default_params(
qdb.software.DefaultParameters(10), {'input_data': 2})
new = qdb.artifact.Artifact.create(
fp, "BIOM",
parents=[qdb.artifact.Artifact(2), qdb.artifact.Artifact(3)],
processing_parameters=params)
self._files_to_remove.extend([x['fp'] for x in new.filepaths])
with qdb.sql_connection.TRN:
sql = "SELECT * FROM qiita.find_artifact_roots(%s)"
qdb.sql_connection.TRN.add(sql, [new.id])
obs = qdb.sql_connection.TRN.execute_fetchindex()
exp = [[1]]
self.assertEqual(obs, exp)
def _create_root_artifact(self):
"""Creates a new root artifact"""
metadata = pd.DataFrame.from_dict(
{'SKB8.640193': {'center_name': 'ANL',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'barcode': 'GTCCGCAAGTTA',
'run_prefix': "s_G1_L001_sequences",
'platform': 'Illumina',
'target_gene': '16S rRNA',
'target_subfragment': 'V4',
'instrument_model': 'Illumina MiSeq',
'library_construction_protocol': 'AAAA',
'experiment_design_description': 'BBBB'}},
orient='index', dtype=str)
pt = qdb.metadata_template.prep_template.PrepTemplate.create(
metadata, qdb.study.Study(1), "18S")
fd, fp = mkstemp(suffix='_seqs.fastq')
close(fd)
self._files_to_remove.append(fp)
with open(fp, 'w') as f:
f.write("test")
fp = [(fp, 1)]
new_root = qdb.artifact.Artifact.create(fp, "FASTQ", prep_template=pt)
self._files_to_remove.extend([x['fp'] for x in new_root.filepaths])
return new_root
def _create_child_artifact(self, parents):
"""Creates a new artifact with the given parents"""
# Add a child of 2 roots
fd, fp = mkstemp(suffix='_seqs.fna')
close(fd)
self._files_to_remove.append(fp)
with open(fp, 'w') as f:
f.write("test")
fp = [(fp, 4)]
params = qdb.software.Parameters.from_default_params(
qdb.software.DefaultParameters(1), {'input_data': 2})
new = qdb.artifact.Artifact.create(
fp, "Demultiplexed", parents=parents,
processing_parameters=params)
return new
def test_find_artifact_roots_is_root_without_children(self):
"""Correctly returns the root if the artifact is already the root
and doesn't have any children
"""
sql = "SELECT * FROM qiita.find_artifact_roots(%s)"
# Add a new root
new_root = self._create_root_artifact()
with qdb.sql_connection.TRN:
qdb.sql_connection.TRN.add(sql, [new_root.id])
obs = qdb.sql_connection.TRN.execute_fetchindex()
exp = [[new_root.id]]
self.assertEqual(obs, exp)
def test_find_artifact_roots_is_child_multiple_parents_multiple_root(self):
"""Correctly returns the roots if the children has multiple roots"""
sql = "SELECT * FROM qiita.find_artifact_roots(%s)"
new_root = self._create_root_artifact()
# Add a child of 2 roots
fd, fp = mkstemp(suffix='_seqs.fna')
close(fd)
self._files_to_remove.append(fp)
with open(fp, 'w') as f:
f.write("test")
fp = [(fp, 4)]
params = qdb.software.Parameters.from_default_params(
qdb.software.DefaultParameters(1), {'input_data': 2})
new = qdb.artifact.Artifact.create(
fp, "Demultiplexed", parents=[qdb.artifact.Artifact(1), new_root],
processing_parameters=params)
self._files_to_remove.extend([x['fp'] for x in new.filepaths])
with qdb.sql_connection.TRN:
qdb.sql_connection.TRN.add(sql, [new.id])
obs = qdb.sql_connection.TRN.execute_fetchindex()
exp = [[1], [new_root.id]]
self.assertCountEqual(obs, exp)
def test_artifact_ancestry_root(self):
"""Correctly returns the ancestry of a root artifact"""
sql = "SELECT * FROM qiita.artifact_ancestry(%s)"
with qdb.sql_connection.TRN:
qdb.sql_connection.TRN.add(sql, [1])
obs = qdb.sql_connection.TRN.execute_fetchindex()
exp = []
self.assertEqual(obs, exp)
def test_artifact_ancestry_leaf(self):
"""Correctly returns the ancestry of a leaf artifact"""
with qdb.sql_connection.TRN:
sql = "SELECT * FROM qiita.artifact_ancestry(%s)"
qdb.sql_connection.TRN.add(sql, [4])
obs = qdb.sql_connection.TRN.execute_fetchindex()
exp = [[4, 2], [2, 1]]
self.assertCountEqual(obs, exp)
def test_artifact_ancestry_leaf_multiple_parents(self):
"""Correctly returns the ancestry of a leaf artifact w multiple parents
"""
root = self._create_root_artifact()
parent1 = self._create_child_artifact([root])
parent2 = self._create_child_artifact([root])
child = self._create_child_artifact([parent1, parent2])
with qdb.sql_connection.TRN:
sql = "SELECT * FROM qiita.artifact_ancestry(%s)"
qdb.sql_connection.TRN.add(sql, [child.id])
obs = qdb.sql_connection.TRN.execute_fetchindex()
exp = [[child.id, parent1.id], [child.id, parent2.id],
[parent1.id, root.id], [parent2.id, root.id]]
self.assertCountEqual(obs, exp)
def test_artifact_ancestry_middle(self):
"""Correctly returns the ancestry of an artifact in the middle of the
DAG"""
with qdb.sql_connection.TRN:
sql = "SELECT * FROM qiita.artifact_ancestry(%s)"
qdb.sql_connection.TRN.add(sql, [2])
obs = qdb.sql_connection.TRN.execute_fetchindex()
exp = [[2, 1]]
self.assertEqual(obs, exp)
def test_artifact_descendants_leaf(self):
"""Correctly returns the descendants of a leaf artifact"""
with qdb.sql_connection.TRN:
sql = "SELECT * FROM qiita.artifact_descendants(%s)"
qdb.sql_connection.TRN.add(sql, [4])
obs = qdb.sql_connection.TRN.execute_fetchindex()
exp = []
self.assertEqual(obs, exp)
def test_artifact_descendants_root(self):
"""Correctly returns the descendants of a root artifact"""
with qdb.sql_connection.TRN:
sql = "SELECT * FROM qiita.artifact_descendants(%s)"
qdb.sql_connection.TRN.add(sql, [1])
obs = qdb.sql_connection.TRN.execute_fetchindex()
exp = [[2, 1], [3, 1], [4, 2], [5, 2], [6, 2]]
self.assertCountEqual(obs, exp)
def test_artifact_descendants_middle(self):
"""Correctly returns the descendants of an artifact in the middle of
the DAG"""
with qdb.sql_connection.TRN:
sql = "SELECT * FROM qiita.artifact_descendants(%s)"
qdb.sql_connection.TRN.add(sql, [2])
obs = qdb.sql_connection.TRN.execute_fetchindex()
exp = [[4, 2], [5, 2], [6, 2]]
self.assertCountEqual(obs, exp)
def test_isnumeric(self):
"""Test SQL function isnumeric"""
exp = [['', False], ['.', False], ['.0', True], ['0.', True],
['0', True], ['1', True], ['123', True], ['123.456', True],
['abc', False], ['1..2', False], ['1.2.3.4', False],
['1x234', False], ['1.234e-5', True]]
sql = ("WITH test(x) AS ("
"VALUES (''), ('.'), ('.0'), ('0.'), ('0'), ('1'), ('123'), "
"('123.456'), ('abc'), ('1..2'), ('1.2.3.4'), ('1x234'), "
"('1.234e-5')) SELECT x, isnumeric(x) FROM test;")
with qdb.sql_connection.TRN:
qdb.sql_connection.TRN.add(sql)
obs = qdb.sql_connection.TRN.execute_fetchindex()
self.assertEqual(exp, obs)
def test_artifact_descendants_with_jobs(self):
"""Test SQL function artifact_descendants_with_jobs"""
exp = [['c350b068-add7-49a5-8846-604ac032cc88', 1, 2],
['d883dab4-503b-45c2-815d-2126ff52dede', 1, 3],
['a4c4b9b9-20ca-47f5-bd30-725cce71df2b', 2, 4],
['624dce65-43a5-4156-a4b6-6c1d02114b67', 2, 5],
['81bbe8d0-b4c2-42eb-ada9-f07c1c91e59f', 2, 6]]
sql = """SELECT * FROM qiita.artifact_descendants_with_jobs(1)"""
with qdb.sql_connection.TRN:
qdb.sql_connection.TRN.add(sql)
obs = qdb.sql_connection.TRN.execute_fetchindex()
# lopping on results to not test the job id as is randomly generated
for e, o in zip(exp, obs):
self.assertEqual(e[1:], o[1:])
if __name__ == '__main__':
main() | qiita_db/test/test_sql.py |
from unittest import TestCase, main
from tempfile import mkstemp
from os import close, remove
from os.path import exists
import pandas as pd
from qiita_core.util import qiita_test_checker
import qiita_db as qdb
@qiita_test_checker()
class TestSQL(TestCase):
"""Tests that the database triggers and procedures work properly"""
def setUp(self):
self._files_to_remove = []
def tearDown(self):
for fp in self._files_to_remove:
if exists(fp):
remove(fp)
def test_find_artifact_roots_is_root(self):
"""Correctly returns the root if the artifact is already the root"""
with qdb.sql_connection.TRN:
sql = "SELECT * FROM qiita.find_artifact_roots(%s)"
qdb.sql_connection.TRN.add(sql, [1])
obs = qdb.sql_connection.TRN.execute_fetchindex()
exp = [[1]]
self.assertEqual(obs, exp)
def test_find_artifact_roots_is_child(self):
"""Correctly returns the root if the artifact is a child"""
with qdb.sql_connection.TRN:
sql = "SELECT * FROM qiita.find_artifact_roots(%s)"
qdb.sql_connection.TRN.add(sql, [4])
obs = qdb.sql_connection.TRN.execute_fetchindex()
exp = [[1]]
self.assertEqual(obs, exp)
def test_find_artifact_roots_is_child_multiple_parents_one_root(self):
"""Correctly returns the roots if the children has multiple parents
but a single root
"""
fd, fp = mkstemp(suffix='_table.biom')
close(fd)
self._files_to_remove.append(fp)
with open(fp, 'w') as f:
f.write("test")
fp = [(fp, 7)]
params = qdb.software.Parameters.from_default_params(
qdb.software.DefaultParameters(10), {'input_data': 2})
new = qdb.artifact.Artifact.create(
fp, "BIOM",
parents=[qdb.artifact.Artifact(2), qdb.artifact.Artifact(3)],
processing_parameters=params)
self._files_to_remove.extend([x['fp'] for x in new.filepaths])
with qdb.sql_connection.TRN:
sql = "SELECT * FROM qiita.find_artifact_roots(%s)"
qdb.sql_connection.TRN.add(sql, [new.id])
obs = qdb.sql_connection.TRN.execute_fetchindex()
exp = [[1]]
self.assertEqual(obs, exp)
def _create_root_artifact(self):
"""Creates a new root artifact"""
metadata = pd.DataFrame.from_dict(
{'SKB8.640193': {'center_name': 'ANL',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'barcode': 'GTCCGCAAGTTA',
'run_prefix': "s_G1_L001_sequences",
'platform': 'Illumina',
'target_gene': '16S rRNA',
'target_subfragment': 'V4',
'instrument_model': 'Illumina MiSeq',
'library_construction_protocol': 'AAAA',
'experiment_design_description': 'BBBB'}},
orient='index', dtype=str)
pt = qdb.metadata_template.prep_template.PrepTemplate.create(
metadata, qdb.study.Study(1), "18S")
fd, fp = mkstemp(suffix='_seqs.fastq')
close(fd)
self._files_to_remove.append(fp)
with open(fp, 'w') as f:
f.write("test")
fp = [(fp, 1)]
new_root = qdb.artifact.Artifact.create(fp, "FASTQ", prep_template=pt)
self._files_to_remove.extend([x['fp'] for x in new_root.filepaths])
return new_root
def _create_child_artifact(self, parents):
"""Creates a new artifact with the given parents"""
# Add a child of 2 roots
fd, fp = mkstemp(suffix='_seqs.fna')
close(fd)
self._files_to_remove.append(fp)
with open(fp, 'w') as f:
f.write("test")
fp = [(fp, 4)]
params = qdb.software.Parameters.from_default_params(
qdb.software.DefaultParameters(1), {'input_data': 2})
new = qdb.artifact.Artifact.create(
fp, "Demultiplexed", parents=parents,
processing_parameters=params)
return new
def test_find_artifact_roots_is_root_without_children(self):
"""Correctly returns the root if the artifact is already the root
and doesn't have any children
"""
sql = "SELECT * FROM qiita.find_artifact_roots(%s)"
# Add a new root
new_root = self._create_root_artifact()
with qdb.sql_connection.TRN:
qdb.sql_connection.TRN.add(sql, [new_root.id])
obs = qdb.sql_connection.TRN.execute_fetchindex()
exp = [[new_root.id]]
self.assertEqual(obs, exp)
def test_find_artifact_roots_is_child_multiple_parents_multiple_root(self):
"""Correctly returns the roots if the children has multiple roots"""
sql = "SELECT * FROM qiita.find_artifact_roots(%s)"
new_root = self._create_root_artifact()
# Add a child of 2 roots
fd, fp = mkstemp(suffix='_seqs.fna')
close(fd)
self._files_to_remove.append(fp)
with open(fp, 'w') as f:
f.write("test")
fp = [(fp, 4)]
params = qdb.software.Parameters.from_default_params(
qdb.software.DefaultParameters(1), {'input_data': 2})
new = qdb.artifact.Artifact.create(
fp, "Demultiplexed", parents=[qdb.artifact.Artifact(1), new_root],
processing_parameters=params)
self._files_to_remove.extend([x['fp'] for x in new.filepaths])
with qdb.sql_connection.TRN:
qdb.sql_connection.TRN.add(sql, [new.id])
obs = qdb.sql_connection.TRN.execute_fetchindex()
exp = [[1], [new_root.id]]
self.assertCountEqual(obs, exp)
def test_artifact_ancestry_root(self):
"""Correctly returns the ancestry of a root artifact"""
sql = "SELECT * FROM qiita.artifact_ancestry(%s)"
with qdb.sql_connection.TRN:
qdb.sql_connection.TRN.add(sql, [1])
obs = qdb.sql_connection.TRN.execute_fetchindex()
exp = []
self.assertEqual(obs, exp)
def test_artifact_ancestry_leaf(self):
"""Correctly returns the ancestry of a leaf artifact"""
with qdb.sql_connection.TRN:
sql = "SELECT * FROM qiita.artifact_ancestry(%s)"
qdb.sql_connection.TRN.add(sql, [4])
obs = qdb.sql_connection.TRN.execute_fetchindex()
exp = [[4, 2], [2, 1]]
self.assertCountEqual(obs, exp)
def test_artifact_ancestry_leaf_multiple_parents(self):
"""Correctly returns the ancestry of a leaf artifact w multiple parents
"""
root = self._create_root_artifact()
parent1 = self._create_child_artifact([root])
parent2 = self._create_child_artifact([root])
child = self._create_child_artifact([parent1, parent2])
with qdb.sql_connection.TRN:
sql = "SELECT * FROM qiita.artifact_ancestry(%s)"
qdb.sql_connection.TRN.add(sql, [child.id])
obs = qdb.sql_connection.TRN.execute_fetchindex()
exp = [[child.id, parent1.id], [child.id, parent2.id],
[parent1.id, root.id], [parent2.id, root.id]]
self.assertCountEqual(obs, exp)
def test_artifact_ancestry_middle(self):
"""Correctly returns the ancestry of an artifact in the middle of the
DAG"""
with qdb.sql_connection.TRN:
sql = "SELECT * FROM qiita.artifact_ancestry(%s)"
qdb.sql_connection.TRN.add(sql, [2])
obs = qdb.sql_connection.TRN.execute_fetchindex()
exp = [[2, 1]]
self.assertEqual(obs, exp)
def test_artifact_descendants_leaf(self):
"""Correctly returns the descendants of a leaf artifact"""
with qdb.sql_connection.TRN:
sql = "SELECT * FROM qiita.artifact_descendants(%s)"
qdb.sql_connection.TRN.add(sql, [4])
obs = qdb.sql_connection.TRN.execute_fetchindex()
exp = []
self.assertEqual(obs, exp)
def test_artifact_descendants_root(self):
"""Correctly returns the descendants of a root artifact"""
with qdb.sql_connection.TRN:
sql = "SELECT * FROM qiita.artifact_descendants(%s)"
qdb.sql_connection.TRN.add(sql, [1])
obs = qdb.sql_connection.TRN.execute_fetchindex()
exp = [[2, 1], [3, 1], [4, 2], [5, 2], [6, 2]]
self.assertCountEqual(obs, exp)
def test_artifact_descendants_middle(self):
"""Correctly returns the descendants of an artifact in the middle of
the DAG"""
with qdb.sql_connection.TRN:
sql = "SELECT * FROM qiita.artifact_descendants(%s)"
qdb.sql_connection.TRN.add(sql, [2])
obs = qdb.sql_connection.TRN.execute_fetchindex()
exp = [[4, 2], [5, 2], [6, 2]]
self.assertCountEqual(obs, exp)
def test_isnumeric(self):
"""Test SQL function isnumeric"""
exp = [['', False], ['.', False], ['.0', True], ['0.', True],
['0', True], ['1', True], ['123', True], ['123.456', True],
['abc', False], ['1..2', False], ['1.2.3.4', False],
['1x234', False], ['1.234e-5', True]]
sql = ("WITH test(x) AS ("
"VALUES (''), ('.'), ('.0'), ('0.'), ('0'), ('1'), ('123'), "
"('123.456'), ('abc'), ('1..2'), ('1.2.3.4'), ('1x234'), "
"('1.234e-5')) SELECT x, isnumeric(x) FROM test;")
with qdb.sql_connection.TRN:
qdb.sql_connection.TRN.add(sql)
obs = qdb.sql_connection.TRN.execute_fetchindex()
self.assertEqual(exp, obs)
def test_artifact_descendants_with_jobs(self):
"""Test SQL function artifact_descendants_with_jobs"""
exp = [['c350b068-add7-49a5-8846-604ac032cc88', 1, 2],
['d883dab4-503b-45c2-815d-2126ff52dede', 1, 3],
['a4c4b9b9-20ca-47f5-bd30-725cce71df2b', 2, 4],
['624dce65-43a5-4156-a4b6-6c1d02114b67', 2, 5],
['81bbe8d0-b4c2-42eb-ada9-f07c1c91e59f', 2, 6]]
sql = """SELECT * FROM qiita.artifact_descendants_with_jobs(1)"""
with qdb.sql_connection.TRN:
qdb.sql_connection.TRN.add(sql)
obs = qdb.sql_connection.TRN.execute_fetchindex()
# lopping on results to not test the job id as is randomly generated
for e, o in zip(exp, obs):
self.assertEqual(e[1:], o[1:])
if __name__ == '__main__':
main() | 0.539226 | 0.364806 |
from melody.constraints.abstract_constraint import AbstractConstraint
from structure.note import Note
from tonalmodel.chromatic_scale import ChromaticScale
from tonalmodel.diatonic_pitch import DiatonicPitch
class ScalarPitchConstraint(AbstractConstraint):
"""
Class representing constraints to ensure some one note is scalar (to the contextual tonality), or within
some subset of tones of the tonality.
"""
def __init__(self, note, scalar_roles=list()):
'''
Constraint constuctor.
:param note: Actor affected by constraint.
:param scalar_roles: List of integer indices to tones in the current tonality, to which the actor must conform.
'''
AbstractConstraint.__init__(self, [note])
self.__scalar_notes = list(scalar_roles)
@property
def actor_note(self):
return self.actors[0]
@property
def scalar_roles(self):
return self.__scalar_notes
def clone(self, new_actors=None):
pass
def verify(self, parameter_map):
contextual_note = parameter_map[self.actor_note]
if contextual_note.note is None:
return False
tone = contextual_note.note.diatonic_pitch.diatonic_tone
if len(self.scalar_roles) == 0:
return tone in contextual_note.policy_context.harmonic_context.tonality.annotation
else:
index = contextual_note.policy_context.harmonic_context.tonality.annotation.index(tone)
return index in self.scalar_roles
def values(self, p_map, v_note):
if v_note != self.actor_note:
raise Exception('v_note {0} not in ScalarConstraint actors.'.format(v_note.note))
policy_context = p_map[self.actor_note].policy_context
tones = list(policy_context.harmonic_context.tonality.annotation)
tones = tones[:-1] # remove final note (same as first)
if len(self.scalar_roles) != 0:
tones = [tones[i] for i in self.scalar_roles]
if p_map[v_note].note is not None:
tone = p_map[v_note].note.diatonic_pitch.diatonic_tone
return {self.actor_note} if tone in tones else None
pitch_range = policy_context.pitch_range
start_partition = max(ChromaticScale.index_to_location(pitch_range.start_index)[0] - 1, 0)
end_partition = min(ChromaticScale.index_to_location(pitch_range.end_index)[0] + 1,
ChromaticScale.CHROMATIC_END[0])
valid_set = set()
for tone in tones:
for j in range(start_partition, end_partition + 1):
pitch = DiatonicPitch(j, tone)
if pitch_range.is_pitch_inbounds(pitch):
note = Note(pitch, self.actor_note.base_duration, self.actor_note.num_dots)
valid_set.add(note)
return valid_set | melody/constraints/scalar_pitch_constraint.py | from melody.constraints.abstract_constraint import AbstractConstraint
from structure.note import Note
from tonalmodel.chromatic_scale import ChromaticScale
from tonalmodel.diatonic_pitch import DiatonicPitch
class ScalarPitchConstraint(AbstractConstraint):
"""
Class representing constraints to ensure some one note is scalar (to the contextual tonality), or within
some subset of tones of the tonality.
"""
def __init__(self, note, scalar_roles=list()):
'''
Constraint constuctor.
:param note: Actor affected by constraint.
:param scalar_roles: List of integer indices to tones in the current tonality, to which the actor must conform.
'''
AbstractConstraint.__init__(self, [note])
self.__scalar_notes = list(scalar_roles)
@property
def actor_note(self):
return self.actors[0]
@property
def scalar_roles(self):
return self.__scalar_notes
def clone(self, new_actors=None):
pass
def verify(self, parameter_map):
contextual_note = parameter_map[self.actor_note]
if contextual_note.note is None:
return False
tone = contextual_note.note.diatonic_pitch.diatonic_tone
if len(self.scalar_roles) == 0:
return tone in contextual_note.policy_context.harmonic_context.tonality.annotation
else:
index = contextual_note.policy_context.harmonic_context.tonality.annotation.index(tone)
return index in self.scalar_roles
def values(self, p_map, v_note):
if v_note != self.actor_note:
raise Exception('v_note {0} not in ScalarConstraint actors.'.format(v_note.note))
policy_context = p_map[self.actor_note].policy_context
tones = list(policy_context.harmonic_context.tonality.annotation)
tones = tones[:-1] # remove final note (same as first)
if len(self.scalar_roles) != 0:
tones = [tones[i] for i in self.scalar_roles]
if p_map[v_note].note is not None:
tone = p_map[v_note].note.diatonic_pitch.diatonic_tone
return {self.actor_note} if tone in tones else None
pitch_range = policy_context.pitch_range
start_partition = max(ChromaticScale.index_to_location(pitch_range.start_index)[0] - 1, 0)
end_partition = min(ChromaticScale.index_to_location(pitch_range.end_index)[0] + 1,
ChromaticScale.CHROMATIC_END[0])
valid_set = set()
for tone in tones:
for j in range(start_partition, end_partition + 1):
pitch = DiatonicPitch(j, tone)
if pitch_range.is_pitch_inbounds(pitch):
note = Note(pitch, self.actor_note.base_duration, self.actor_note.num_dots)
valid_set.add(note)
return valid_set | 0.93488 | 0.459197 |
import argparse
import os
from os.path import abspath, dirname, join
import re
from typing import Tuple
import pandas as pd
from ncmagics import japanmap
def parse_args() -> dict:
"""parse_args.
Set directory path from stdin.
Args:
Returns:
dict:
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"-d", "--dir", help="set directory which contained csv files.", type=str)
p = parser.parse_args()
args = {"dir": p.dir}
return args
def _cat_path(dir_path: str, files: str) -> str:
"""_cat_path.
used in mkfile_list() to make abspath.
Args:
dir_path (str): dir_path
files (str): files
Returns:
str:
"""
abs_path = join(dir_path, files)
return abs_path
def mkfile_list(dirpath: str) -> list:
"""mkfile_list.
get csv list and sort by datetime order.
using _cat_path() to convert abspath
Args:
dirpath (str): dirpath
Returns:
list:
"""
csv_list = sorted([
_cat_path(dirpath, i)
for i in os.listdir(dirpath)
if "initialdata" not in i
])
return csv_list
def read_csv(csvfile: str) -> Tuple[
pd.Series, pd.Series, pd.Series]:
"""read_csv.
read cyclone center location from csvfile.
Args:
csvfile (str): csvfile
Returns:
Tuple[
pd.Series, pd.Series, pd.Series]:
"""
df = pd.read_csv(csvfile, header=0)
lat = df["latitude"]
lon = df["longitude"]
prmsl = df["prmsl center (hPa)"]
return lat, lon, prmsl
def mk_title_outname(dir_name: str) -> Tuple[str, str]:
"""mk_title_outname.
Args:
dir_name (str): dir_name
Returns:
Tuple[str, str]:
"""
dir_name_has_datetime = re.search("[0-9]{8}", dir_name)
if dir_name_has_datetime:
dir_datetime = dir_name_has_datetime.group()
else:
dir_datetime = ""
outname = join(abspath(dirname(__file__)) + "/trackdatafig/" +
dir_datetime + "/compare_trackdata")
title = ("compare cyclone center trackdata" + dir_datetime)
return outname, title
def main():
"""main
plot cyclone center location on near japan map.
"""
args = parse_args()
dirpath = abspath(join(dirname(__file__), args["dir"]))
mkfile_list(dirpath)
csv_list = mkfile_list(dirpath)
plt_map = japanmap.JpMap(color=True)
# plot model's initial data.
label = "initial data"
initialdata = join(abspath(dirname(__file__)) + "/" + args["dir"] + "/initialdata.csv")
lat, lon, prmsl = read_csv(initialdata)
plt_map.color_list.insert(0, '#696969')
plt_map.plot_data(lat, lon, str(label))
plt_map.plot_prmsl_circle(lat, lon, prmsl)
# plot forecast data.
for data in csv_list:
data_has_datetime = re.search("[0-9]{10}", data)
if data_has_datetime:
label = data_has_datetime.group()
else:
continue
lat, lon, prmsl = read_csv(data)
plt_map.plot_data(lon, lat, str(label))
plt_map.plot_prmsl_circle(lon, lat, prmsl)
# picture title and outname
outname, title = mk_title_outname(args["dir"])
plt_map.save_fig(outname, title)
if __name__ == "__main__":
main() | main/plotcyclone_loc_dir.py | import argparse
import os
from os.path import abspath, dirname, join
import re
from typing import Tuple
import pandas as pd
from ncmagics import japanmap
def parse_args() -> dict:
"""parse_args.
Set directory path from stdin.
Args:
Returns:
dict:
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"-d", "--dir", help="set directory which contained csv files.", type=str)
p = parser.parse_args()
args = {"dir": p.dir}
return args
def _cat_path(dir_path: str, files: str) -> str:
"""_cat_path.
used in mkfile_list() to make abspath.
Args:
dir_path (str): dir_path
files (str): files
Returns:
str:
"""
abs_path = join(dir_path, files)
return abs_path
def mkfile_list(dirpath: str) -> list:
"""mkfile_list.
get csv list and sort by datetime order.
using _cat_path() to convert abspath
Args:
dirpath (str): dirpath
Returns:
list:
"""
csv_list = sorted([
_cat_path(dirpath, i)
for i in os.listdir(dirpath)
if "initialdata" not in i
])
return csv_list
def read_csv(csvfile: str) -> Tuple[
pd.Series, pd.Series, pd.Series]:
"""read_csv.
read cyclone center location from csvfile.
Args:
csvfile (str): csvfile
Returns:
Tuple[
pd.Series, pd.Series, pd.Series]:
"""
df = pd.read_csv(csvfile, header=0)
lat = df["latitude"]
lon = df["longitude"]
prmsl = df["prmsl center (hPa)"]
return lat, lon, prmsl
def mk_title_outname(dir_name: str) -> Tuple[str, str]:
"""mk_title_outname.
Args:
dir_name (str): dir_name
Returns:
Tuple[str, str]:
"""
dir_name_has_datetime = re.search("[0-9]{8}", dir_name)
if dir_name_has_datetime:
dir_datetime = dir_name_has_datetime.group()
else:
dir_datetime = ""
outname = join(abspath(dirname(__file__)) + "/trackdatafig/" +
dir_datetime + "/compare_trackdata")
title = ("compare cyclone center trackdata" + dir_datetime)
return outname, title
def main():
"""main
plot cyclone center location on near japan map.
"""
args = parse_args()
dirpath = abspath(join(dirname(__file__), args["dir"]))
mkfile_list(dirpath)
csv_list = mkfile_list(dirpath)
plt_map = japanmap.JpMap(color=True)
# plot model's initial data.
label = "initial data"
initialdata = join(abspath(dirname(__file__)) + "/" + args["dir"] + "/initialdata.csv")
lat, lon, prmsl = read_csv(initialdata)
plt_map.color_list.insert(0, '#696969')
plt_map.plot_data(lat, lon, str(label))
plt_map.plot_prmsl_circle(lat, lon, prmsl)
# plot forecast data.
for data in csv_list:
data_has_datetime = re.search("[0-9]{10}", data)
if data_has_datetime:
label = data_has_datetime.group()
else:
continue
lat, lon, prmsl = read_csv(data)
plt_map.plot_data(lon, lat, str(label))
plt_map.plot_prmsl_circle(lon, lat, prmsl)
# picture title and outname
outname, title = mk_title_outname(args["dir"])
plt_map.save_fig(outname, title)
if __name__ == "__main__":
main() | 0.549641 | 0.213142 |
import unittest
import logging
# project
from stackstate_checks.splunk.config import SplunkInstanceConfig, SplunkSavedSearch
from stackstate_checks.splunk.client import FinalizeException
from test_splunk_instance_config import MockedCommittableState, mock_defaults
from stackstate_checks.splunk.saved_search_helper import SavedSearches
from stackstate_checks.base import AgentCheck
from stackstate_checks.base.errors import CheckException
class MockSplunkClient(object):
def __init__(self):
self.saved_searches_result = []
self.saved_search_results_results = {}
self.dispatch_results = {}
self.finalized = []
self.max_parallel_searches = 999
self.parallel_searches = 0
def saved_searches(self):
return self.saved_searches_result
def saved_search_results(self, search_id, saved_search):
self.parallel_searches -= 1
return self.saved_search_results_results[search_id]
def dispatch(self, saved_search, splunk_app, ignore_saved_search_errors, parameters):
self.parallel_searches += 1
assert self.parallel_searches <= self.max_parallel_searches
return self.dispatch_results[saved_search.name]
def finalize_sid(self, search_id, saved_search):
self.finalized.append(search_id)
return
class MockSplunkClientFailFinalize(MockSplunkClient):
def __init__(self, *args, **kwargs):
super(MockSplunkClientFailFinalize, self).__init__(*args, **kwargs)
def finalize_sid(self, search_id, saved_search):
self.finalized.append(search_id)
raise FinalizeException(0, "Broken")
base_instance = {
'url': 'http://localhost:8089',
'authentication': {
'basic_auth': {
'username': "adminNew",
'password': "<PASSWORD>"
}
},
'tags': ['mytag', 'mytag2']
}
class MockServiceCheck(object):
def __init__(self):
self.results = []
self.ret_val = 0
def _service_check(status, tags=None, hostname=None, message=None):
self.results.append([status, tags, hostname, message])
self.function = _service_check
class MockProcessData(object):
def __init__(self):
self.results = []
self.ret_val = 0
def _process_data(saved_search, response):
self.results.append(response)
return self.ret_val
self.function = _process_data
class TestSplunkInstanceConfig(unittest.TestCase):
def setUp(self):
self.log = logging.getLogger('%s' % __name__)
self.committable_state = MockedCommittableState({})
self.mock_service_check = MockServiceCheck()
self.mock_process_data = MockProcessData()
self.mock_splunk_client = MockSplunkClient()
def test_no_saved_searches(self):
searches = SavedSearches(SplunkInstanceConfig(base_instance, {}, mock_defaults), self.mock_splunk_client, [])
searches.run_saved_searches(self.mock_process_data.function, self.mock_service_check.function, self.log,
self.committable_state)
assert self.mock_process_data.results == []
assert self.mock_service_check.results == [[AgentCheck.OK, None, None, None]]
def test_process_data(self):
instance = SplunkInstanceConfig(base_instance, {}, mock_defaults)
searches = SavedSearches(instance, self.mock_splunk_client, [SplunkSavedSearch(instance, {'name': 'search1'}),
SplunkSavedSearch(instance, {'name': 'search2'})])
self.mock_splunk_client.dispatch_results['search1'] = 'sid1'
self.mock_splunk_client.dispatch_results['search2'] = 'sid2'
data1 = {'messages': [], 'results': [{'data': 'result1'}]}
data2 = {'messages': [], 'results': [{'data': 'result2'}]}
self.mock_splunk_client.saved_search_results_results['sid1'] = [data1]
self.mock_splunk_client.saved_search_results_results['sid2'] = [data2]
searches.run_saved_searches(self.mock_process_data.function, self.mock_service_check.function, self.log,
self.committable_state)
assert self.mock_process_data.results == [data1, data2]
assert self.mock_service_check.results == [[AgentCheck.OK, None, None, None]]
def test_store_and_finalize_sids(self):
instance = SplunkInstanceConfig(base_instance, {}, mock_defaults)
searches = SavedSearches(instance, self.mock_splunk_client, [SplunkSavedSearch(instance, {'name': 'breaking'})])
# Dispatch should succeed, but not the result retrieval
self.mock_splunk_client.dispatch_results['breaking'] = 'sid1'
issue = True
try:
searches.run_saved_searches(self.mock_process_data.function, self.mock_service_check.function, self.log,
self.committable_state)
issue = False
except Exception:
issue = True
assert issue
assert self.mock_service_check.results == []
assert self.committable_state.state == {'sid_breaking': 'sid1'}
# Make sure it now runs correctly
data1 = {'messages': [], 'results': [{'data': 'result1'}]}
self.mock_splunk_client.saved_search_results_results['sid1'] = [data1]
# Run again, this finalizes and reruns the search
searches.run_saved_searches(self.mock_process_data.function, self.mock_service_check.function, self.log,
self.committable_state)
assert self.mock_splunk_client.finalized == ['sid1']
assert self.mock_service_check.results == [[AgentCheck.OK, None, None, None]]
assert self.committable_state.state == {'sid_breaking': 'sid1'}
def test_keep_sid_when_finalize_fails(self):
instance = SplunkInstanceConfig(base_instance, {}, mock_defaults)
mock_splunk_client = MockSplunkClientFailFinalize()
searches = SavedSearches(instance, mock_splunk_client, [SplunkSavedSearch(instance, {'name': 'breaking'})])
# Dispatch should succeed, but not the result retrieval
mock_splunk_client.dispatch_results['breaking'] = 'sid1'
issue = None
try:
searches.run_saved_searches(self.mock_process_data.function, self.mock_service_check.function, self.log,
self.committable_state)
issue = False
except Exception:
issue = True
assert issue
assert self.mock_service_check.results == []
assert self.committable_state.state == {'sid_breaking': 'sid1'}
# Run again, this will trigger an issue during finalize
issue2 = None
try:
searches.run_saved_searches(self.mock_process_data.function, self.mock_service_check.function, self.log,
self.committable_state)
issue2 = False
except FinalizeException:
issue2 = True
assert issue2
assert mock_splunk_client.finalized == ['sid1']
assert self.mock_service_check.results == []
assert self.committable_state.state == {'sid_breaking': 'sid1'}
def test_incomplete_data(self):
instance = SplunkInstanceConfig(base_instance, {}, mock_defaults)
searches = SavedSearches(instance, self.mock_splunk_client, [SplunkSavedSearch(instance, {'name': 'search1'}),
SplunkSavedSearch(instance, {'name': 'search2'})])
self.mock_splunk_client.dispatch_results['search1'] = 'sid1'
self.mock_splunk_client.dispatch_results['search2'] = 'sid2'
data1 = {'messages': [], 'results': [{'data': 'result1'}]}
data2 = {'messages': [], 'results': [{'data': 'result2'}]}
self.mock_splunk_client.saved_search_results_results['sid1'] = [data1]
self.mock_splunk_client.saved_search_results_results['sid2'] = [data2]
self.mock_process_data.ret_val = 1
issue = None
try:
searches.run_saved_searches(self.mock_process_data.function, self.mock_service_check.function, self.log,
self.committable_state)
issue = False
except CheckException:
issue = True
assert issue
assert self.mock_process_data.results == [data1]
assert self.mock_service_check.results == []
def test_partially_incomplete_data(self):
instance = SplunkInstanceConfig(base_instance, {}, mock_defaults)
searches = SavedSearches(instance, self.mock_splunk_client, [SplunkSavedSearch(instance, {'name': 'search1'})])
self.mock_splunk_client.dispatch_results['search1'] = 'sid1'
self.mock_splunk_client.dispatch_results['search2'] = 'sid2'
data1 = {'messages': [], 'results': [{'data': 'result1'}, {'data': 'result2'}]}
self.mock_splunk_client.saved_search_results_results['sid1'] = [data1]
self.mock_process_data.ret_val = 1
searches.run_saved_searches(self.mock_process_data.function, self.mock_service_check.function, self.log,
self.committable_state)
assert self.mock_process_data.results == [data1]
assert self.mock_service_check.results == [[AgentCheck.WARNING, ['mytag', 'mytag2'], None,
"The saved search 'search1' contained 1 incomplete records"]]
def test_partially_incomplete_and_incomplete(self):
instance = SplunkInstanceConfig(base_instance, {}, mock_defaults)
searches = SavedSearches(instance, self.mock_splunk_client, [SplunkSavedSearch(instance, {'name': 'search1'}),
SplunkSavedSearch(instance, {'name': 'search2'})])
self.mock_splunk_client.dispatch_results['search1'] = 'sid1'
self.mock_splunk_client.dispatch_results['search2'] = 'sid2'
data1 = {'messages': [], 'results': [{'data': 'result1'}, {'data': 'result1_2'}]}
data2 = {'messages': [], 'results': [{'data': 'result2'}]}
self.mock_splunk_client.saved_search_results_results['sid1'] = [data1]
self.mock_splunk_client.saved_search_results_results['sid2'] = [data2]
self.mock_process_data.ret_val = 1
issue = None
try:
searches.run_saved_searches(self.mock_process_data.function, self.mock_service_check.function, self.log,
self.committable_state)
issue = False
except CheckException:
issue = True
assert issue
assert self.mock_process_data.results == [data1, data2]
assert self.mock_service_check.results == [[AgentCheck.WARNING, ['mytag', 'mytag2'], None,
"The saved search 'search1' contained 1 incomplete records"]]
def test_wildcard_topology(self):
instance = SplunkInstanceConfig(base_instance, {}, mock_defaults)
searches = SavedSearches(instance, self.mock_splunk_client, [SplunkSavedSearch(instance, {'match': 'comp.*'}),
SplunkSavedSearch(instance, {'match': 'rel.*'})])
self.mock_splunk_client.saved_searches_result = ["components", "relations"]
self.mock_splunk_client.dispatch_results['components'] = 'sid1'
self.mock_splunk_client.dispatch_results['relations'] = 'sid2'
data1 = {'messages': [], 'results': [{'data': 'result1'}]}
data2 = {'messages': [], 'results': [{'data': 'result2'}]}
self.mock_splunk_client.saved_search_results_results['sid1'] = [data1]
self.mock_splunk_client.saved_search_results_results['sid2'] = [data2]
searches.run_saved_searches(self.mock_process_data.function, self.mock_service_check.function, self.log,
self.committable_state)
assert len(self.mock_process_data.results) == 2
assert self.mock_service_check.results == [[AgentCheck.OK, None, None, None]]
# Now drop all saved searches and see them disappear
self.mock_splunk_client.saved_searches_result = []
self.mock_process_data = MockProcessData()
self.mock_service_check = MockServiceCheck()
searches.run_saved_searches(self.mock_process_data.function, self.mock_service_check.function, self.log,
self.committable_state)
assert self.mock_process_data.results == []
assert self.mock_service_check.results == [[AgentCheck.OK, None, None, None]]
def test_does_not_exceed_parallel_dispatches(self):
saved_searches_parallel = 2
instance = {
'url': 'http://localhost:8089',
'saved_searches_parallel': saved_searches_parallel,
'authentication': {
'basic_auth': {
'username': "adminNew",
'password': "<PASSWORD>"
}
},
'tags': ['mytag', 'mytag2']
}
instance = SplunkInstanceConfig(instance, {}, mock_defaults)
searches = SavedSearches(instance, self.mock_splunk_client, [SplunkSavedSearch(instance, {'name': 'search1'}),
SplunkSavedSearch(instance, {'name': 'search2'}),
SplunkSavedSearch(instance, {'name': 'search3'}),
SplunkSavedSearch(instance, {'name': 'search4'})
])
self.mock_splunk_client.max_parallel_searches = saved_searches_parallel
self.mock_splunk_client.dispatch_results['search1'] = 'sid1'
self.mock_splunk_client.dispatch_results['search2'] = 'sid2'
self.mock_splunk_client.dispatch_results['search3'] = 'sid3'
self.mock_splunk_client.dispatch_results['search4'] = 'sid4'
self.mock_splunk_client.saved_search_results_results['sid1'] = []
self.mock_splunk_client.saved_search_results_results['sid2'] = []
self.mock_splunk_client.saved_search_results_results['sid3'] = []
self.mock_splunk_client.saved_search_results_results['sid4'] = []
searches.run_saved_searches(self.mock_process_data.function, self.mock_service_check.function, self.log,
self.committable_state)
assert self.mock_process_data.results == []
assert self.mock_service_check.results == [[AgentCheck.OK, None, None, None]]
def test_ignore_saved_search_errors_continue(self):
"""
When 1 saved search fails with Check Exception, the code should continue and send
topology if issues are ignored.
"""
instance = {
'url': 'http://localhost:8089',
'ignore_saved_search_errors': True,
'authentication': {
'basic_auth': {
'username': "adminNew",
'password': "<PASSWORD>"
}
},
'tags': ['mytag', 'mytag2']
}
instance = SplunkInstanceConfig(instance, {}, mock_defaults)
searches = SavedSearches(instance, self.mock_splunk_client,
[SplunkSavedSearch(instance, {'name': 'search_broken'}),
SplunkSavedSearch(instance, {'name': 'search1'})])
self.mock_splunk_client.dispatch_results['search_broken'] = 'sid_broken'
self.mock_splunk_client.dispatch_results['search1'] = 'sid1'
data1 = {'messages': [], 'results': [{'data': 'result1'}]}
self.mock_splunk_client.saved_search_results_results['sid1'] = [data1]
searches.run_saved_searches(self.mock_process_data.function, self.mock_service_check.function, self.log,
self.committable_state)
assert self.mock_process_data.results == [data1]
assert self.mock_service_check.results == [[AgentCheck.WARNING, ['mytag', 'mytag2'], None, "'sid_broken'"]]
def test_no_ignore_saved_search_errors_breaks(self):
"""
When 1 saved search fails with Check Exception, the code
should continue and send topology if issues are ignored.
"""
instance = {
'url': 'http://localhost:8089',
'ignore_saved_search_errors': False,
'authentication': {
'basic_auth': {
'username': "adminNew",
'password': "<PASSWORD>"
}
},
'tags': ['mytag', 'mytag2']
}
instance = SplunkInstanceConfig(instance, {}, mock_defaults)
searches = SavedSearches(instance, self.mock_splunk_client,
[SplunkSavedSearch(instance, {'name': 'search_broken'}),
SplunkSavedSearch(instance, {'name': 'search1'})])
self.mock_splunk_client.dispatch_results['search_broken'] = 'sid_broken'
self.mock_splunk_client.dispatch_results['search1'] = 'sid1'
data1 = {'messages': [], 'results': [{'data': 'result1'}]}
self.mock_splunk_client.saved_search_results_results['sid1'] = [data1]
issue = True
try:
searches.run_saved_searches(self.mock_process_data.function, self.mock_service_check.function, self.log,
self.committable_state)
issue = False
except Exception:
issue = True
assert issue
assert self.mock_process_data.results == []
assert self.mock_service_check.results == []
def test_incomplete_and_failing_produce_warnings(self):
"""
When both saved search fails with Check Exception, the code should continue and send topology.
"""
instance = {
'url': 'http://localhost:8089',
'authentication': {
'basic_auth': {
'username': "admin",
'password': "<PASSWORD>"
}
},
'ignore_saved_search_errors': True,
}
instance = SplunkInstanceConfig(instance, {}, mock_defaults)
searches = SavedSearches(instance, self.mock_splunk_client, [SplunkSavedSearch(instance, {'name': 'search1'}),
SplunkSavedSearch(instance,
{'name': 'search_broken'})])
self.mock_splunk_client.dispatch_results['search1'] = 'sid1'
self.mock_splunk_client.dispatch_results['search_broken'] = 'broken_sid'
data1 = {'messages': [], 'results': [{'data': 'result1'}]}
self.mock_splunk_client.saved_search_results_results['sid1'] = [data1]
self.mock_process_data.ret_val = 1
searches.run_saved_searches(self.mock_process_data.function, self.mock_service_check.function, self.log,
self.committable_state)
assert self.mock_service_check.results == [
[AgentCheck.WARNING, [], None,
"All result of saved search 'search1' contained incomplete data"],
[AgentCheck.WARNING, [], None, "'broken_sid'"]] | splunk_base/tests/test_saved_search_helper.py | import unittest
import logging
# project
from stackstate_checks.splunk.config import SplunkInstanceConfig, SplunkSavedSearch
from stackstate_checks.splunk.client import FinalizeException
from test_splunk_instance_config import MockedCommittableState, mock_defaults
from stackstate_checks.splunk.saved_search_helper import SavedSearches
from stackstate_checks.base import AgentCheck
from stackstate_checks.base.errors import CheckException
class MockSplunkClient(object):
def __init__(self):
self.saved_searches_result = []
self.saved_search_results_results = {}
self.dispatch_results = {}
self.finalized = []
self.max_parallel_searches = 999
self.parallel_searches = 0
def saved_searches(self):
return self.saved_searches_result
def saved_search_results(self, search_id, saved_search):
self.parallel_searches -= 1
return self.saved_search_results_results[search_id]
def dispatch(self, saved_search, splunk_app, ignore_saved_search_errors, parameters):
self.parallel_searches += 1
assert self.parallel_searches <= self.max_parallel_searches
return self.dispatch_results[saved_search.name]
def finalize_sid(self, search_id, saved_search):
self.finalized.append(search_id)
return
class MockSplunkClientFailFinalize(MockSplunkClient):
def __init__(self, *args, **kwargs):
super(MockSplunkClientFailFinalize, self).__init__(*args, **kwargs)
def finalize_sid(self, search_id, saved_search):
self.finalized.append(search_id)
raise FinalizeException(0, "Broken")
base_instance = {
'url': 'http://localhost:8089',
'authentication': {
'basic_auth': {
'username': "adminNew",
'password': "<PASSWORD>"
}
},
'tags': ['mytag', 'mytag2']
}
class MockServiceCheck(object):
def __init__(self):
self.results = []
self.ret_val = 0
def _service_check(status, tags=None, hostname=None, message=None):
self.results.append([status, tags, hostname, message])
self.function = _service_check
class MockProcessData(object):
def __init__(self):
self.results = []
self.ret_val = 0
def _process_data(saved_search, response):
self.results.append(response)
return self.ret_val
self.function = _process_data
class TestSplunkInstanceConfig(unittest.TestCase):
def setUp(self):
self.log = logging.getLogger('%s' % __name__)
self.committable_state = MockedCommittableState({})
self.mock_service_check = MockServiceCheck()
self.mock_process_data = MockProcessData()
self.mock_splunk_client = MockSplunkClient()
def test_no_saved_searches(self):
searches = SavedSearches(SplunkInstanceConfig(base_instance, {}, mock_defaults), self.mock_splunk_client, [])
searches.run_saved_searches(self.mock_process_data.function, self.mock_service_check.function, self.log,
self.committable_state)
assert self.mock_process_data.results == []
assert self.mock_service_check.results == [[AgentCheck.OK, None, None, None]]
def test_process_data(self):
instance = SplunkInstanceConfig(base_instance, {}, mock_defaults)
searches = SavedSearches(instance, self.mock_splunk_client, [SplunkSavedSearch(instance, {'name': 'search1'}),
SplunkSavedSearch(instance, {'name': 'search2'})])
self.mock_splunk_client.dispatch_results['search1'] = 'sid1'
self.mock_splunk_client.dispatch_results['search2'] = 'sid2'
data1 = {'messages': [], 'results': [{'data': 'result1'}]}
data2 = {'messages': [], 'results': [{'data': 'result2'}]}
self.mock_splunk_client.saved_search_results_results['sid1'] = [data1]
self.mock_splunk_client.saved_search_results_results['sid2'] = [data2]
searches.run_saved_searches(self.mock_process_data.function, self.mock_service_check.function, self.log,
self.committable_state)
assert self.mock_process_data.results == [data1, data2]
assert self.mock_service_check.results == [[AgentCheck.OK, None, None, None]]
def test_store_and_finalize_sids(self):
instance = SplunkInstanceConfig(base_instance, {}, mock_defaults)
searches = SavedSearches(instance, self.mock_splunk_client, [SplunkSavedSearch(instance, {'name': 'breaking'})])
# Dispatch should succeed, but not the result retrieval
self.mock_splunk_client.dispatch_results['breaking'] = 'sid1'
issue = True
try:
searches.run_saved_searches(self.mock_process_data.function, self.mock_service_check.function, self.log,
self.committable_state)
issue = False
except Exception:
issue = True
assert issue
assert self.mock_service_check.results == []
assert self.committable_state.state == {'sid_breaking': 'sid1'}
# Make sure it now runs correctly
data1 = {'messages': [], 'results': [{'data': 'result1'}]}
self.mock_splunk_client.saved_search_results_results['sid1'] = [data1]
# Run again, this finalizes and reruns the search
searches.run_saved_searches(self.mock_process_data.function, self.mock_service_check.function, self.log,
self.committable_state)
assert self.mock_splunk_client.finalized == ['sid1']
assert self.mock_service_check.results == [[AgentCheck.OK, None, None, None]]
assert self.committable_state.state == {'sid_breaking': 'sid1'}
def test_keep_sid_when_finalize_fails(self):
instance = SplunkInstanceConfig(base_instance, {}, mock_defaults)
mock_splunk_client = MockSplunkClientFailFinalize()
searches = SavedSearches(instance, mock_splunk_client, [SplunkSavedSearch(instance, {'name': 'breaking'})])
# Dispatch should succeed, but not the result retrieval
mock_splunk_client.dispatch_results['breaking'] = 'sid1'
issue = None
try:
searches.run_saved_searches(self.mock_process_data.function, self.mock_service_check.function, self.log,
self.committable_state)
issue = False
except Exception:
issue = True
assert issue
assert self.mock_service_check.results == []
assert self.committable_state.state == {'sid_breaking': 'sid1'}
# Run again, this will trigger an issue during finalize
issue2 = None
try:
searches.run_saved_searches(self.mock_process_data.function, self.mock_service_check.function, self.log,
self.committable_state)
issue2 = False
except FinalizeException:
issue2 = True
assert issue2
assert mock_splunk_client.finalized == ['sid1']
assert self.mock_service_check.results == []
assert self.committable_state.state == {'sid_breaking': 'sid1'}
def test_incomplete_data(self):
instance = SplunkInstanceConfig(base_instance, {}, mock_defaults)
searches = SavedSearches(instance, self.mock_splunk_client, [SplunkSavedSearch(instance, {'name': 'search1'}),
SplunkSavedSearch(instance, {'name': 'search2'})])
self.mock_splunk_client.dispatch_results['search1'] = 'sid1'
self.mock_splunk_client.dispatch_results['search2'] = 'sid2'
data1 = {'messages': [], 'results': [{'data': 'result1'}]}
data2 = {'messages': [], 'results': [{'data': 'result2'}]}
self.mock_splunk_client.saved_search_results_results['sid1'] = [data1]
self.mock_splunk_client.saved_search_results_results['sid2'] = [data2]
self.mock_process_data.ret_val = 1
issue = None
try:
searches.run_saved_searches(self.mock_process_data.function, self.mock_service_check.function, self.log,
self.committable_state)
issue = False
except CheckException:
issue = True
assert issue
assert self.mock_process_data.results == [data1]
assert self.mock_service_check.results == []
def test_partially_incomplete_data(self):
instance = SplunkInstanceConfig(base_instance, {}, mock_defaults)
searches = SavedSearches(instance, self.mock_splunk_client, [SplunkSavedSearch(instance, {'name': 'search1'})])
self.mock_splunk_client.dispatch_results['search1'] = 'sid1'
self.mock_splunk_client.dispatch_results['search2'] = 'sid2'
data1 = {'messages': [], 'results': [{'data': 'result1'}, {'data': 'result2'}]}
self.mock_splunk_client.saved_search_results_results['sid1'] = [data1]
self.mock_process_data.ret_val = 1
searches.run_saved_searches(self.mock_process_data.function, self.mock_service_check.function, self.log,
self.committable_state)
assert self.mock_process_data.results == [data1]
assert self.mock_service_check.results == [[AgentCheck.WARNING, ['mytag', 'mytag2'], None,
"The saved search 'search1' contained 1 incomplete records"]]
def test_partially_incomplete_and_incomplete(self):
instance = SplunkInstanceConfig(base_instance, {}, mock_defaults)
searches = SavedSearches(instance, self.mock_splunk_client, [SplunkSavedSearch(instance, {'name': 'search1'}),
SplunkSavedSearch(instance, {'name': 'search2'})])
self.mock_splunk_client.dispatch_results['search1'] = 'sid1'
self.mock_splunk_client.dispatch_results['search2'] = 'sid2'
data1 = {'messages': [], 'results': [{'data': 'result1'}, {'data': 'result1_2'}]}
data2 = {'messages': [], 'results': [{'data': 'result2'}]}
self.mock_splunk_client.saved_search_results_results['sid1'] = [data1]
self.mock_splunk_client.saved_search_results_results['sid2'] = [data2]
self.mock_process_data.ret_val = 1
issue = None
try:
searches.run_saved_searches(self.mock_process_data.function, self.mock_service_check.function, self.log,
self.committable_state)
issue = False
except CheckException:
issue = True
assert issue
assert self.mock_process_data.results == [data1, data2]
assert self.mock_service_check.results == [[AgentCheck.WARNING, ['mytag', 'mytag2'], None,
"The saved search 'search1' contained 1 incomplete records"]]
def test_wildcard_topology(self):
instance = SplunkInstanceConfig(base_instance, {}, mock_defaults)
searches = SavedSearches(instance, self.mock_splunk_client, [SplunkSavedSearch(instance, {'match': 'comp.*'}),
SplunkSavedSearch(instance, {'match': 'rel.*'})])
self.mock_splunk_client.saved_searches_result = ["components", "relations"]
self.mock_splunk_client.dispatch_results['components'] = 'sid1'
self.mock_splunk_client.dispatch_results['relations'] = 'sid2'
data1 = {'messages': [], 'results': [{'data': 'result1'}]}
data2 = {'messages': [], 'results': [{'data': 'result2'}]}
self.mock_splunk_client.saved_search_results_results['sid1'] = [data1]
self.mock_splunk_client.saved_search_results_results['sid2'] = [data2]
searches.run_saved_searches(self.mock_process_data.function, self.mock_service_check.function, self.log,
self.committable_state)
assert len(self.mock_process_data.results) == 2
assert self.mock_service_check.results == [[AgentCheck.OK, None, None, None]]
# Now drop all saved searches and see them disappear
self.mock_splunk_client.saved_searches_result = []
self.mock_process_data = MockProcessData()
self.mock_service_check = MockServiceCheck()
searches.run_saved_searches(self.mock_process_data.function, self.mock_service_check.function, self.log,
self.committable_state)
assert self.mock_process_data.results == []
assert self.mock_service_check.results == [[AgentCheck.OK, None, None, None]]
def test_does_not_exceed_parallel_dispatches(self):
saved_searches_parallel = 2
instance = {
'url': 'http://localhost:8089',
'saved_searches_parallel': saved_searches_parallel,
'authentication': {
'basic_auth': {
'username': "adminNew",
'password': "<PASSWORD>"
}
},
'tags': ['mytag', 'mytag2']
}
instance = SplunkInstanceConfig(instance, {}, mock_defaults)
searches = SavedSearches(instance, self.mock_splunk_client, [SplunkSavedSearch(instance, {'name': 'search1'}),
SplunkSavedSearch(instance, {'name': 'search2'}),
SplunkSavedSearch(instance, {'name': 'search3'}),
SplunkSavedSearch(instance, {'name': 'search4'})
])
self.mock_splunk_client.max_parallel_searches = saved_searches_parallel
self.mock_splunk_client.dispatch_results['search1'] = 'sid1'
self.mock_splunk_client.dispatch_results['search2'] = 'sid2'
self.mock_splunk_client.dispatch_results['search3'] = 'sid3'
self.mock_splunk_client.dispatch_results['search4'] = 'sid4'
self.mock_splunk_client.saved_search_results_results['sid1'] = []
self.mock_splunk_client.saved_search_results_results['sid2'] = []
self.mock_splunk_client.saved_search_results_results['sid3'] = []
self.mock_splunk_client.saved_search_results_results['sid4'] = []
searches.run_saved_searches(self.mock_process_data.function, self.mock_service_check.function, self.log,
self.committable_state)
assert self.mock_process_data.results == []
assert self.mock_service_check.results == [[AgentCheck.OK, None, None, None]]
def test_ignore_saved_search_errors_continue(self):
"""
When 1 saved search fails with Check Exception, the code should continue and send
topology if issues are ignored.
"""
instance = {
'url': 'http://localhost:8089',
'ignore_saved_search_errors': True,
'authentication': {
'basic_auth': {
'username': "adminNew",
'password': "<PASSWORD>"
}
},
'tags': ['mytag', 'mytag2']
}
instance = SplunkInstanceConfig(instance, {}, mock_defaults)
searches = SavedSearches(instance, self.mock_splunk_client,
[SplunkSavedSearch(instance, {'name': 'search_broken'}),
SplunkSavedSearch(instance, {'name': 'search1'})])
self.mock_splunk_client.dispatch_results['search_broken'] = 'sid_broken'
self.mock_splunk_client.dispatch_results['search1'] = 'sid1'
data1 = {'messages': [], 'results': [{'data': 'result1'}]}
self.mock_splunk_client.saved_search_results_results['sid1'] = [data1]
searches.run_saved_searches(self.mock_process_data.function, self.mock_service_check.function, self.log,
self.committable_state)
assert self.mock_process_data.results == [data1]
assert self.mock_service_check.results == [[AgentCheck.WARNING, ['mytag', 'mytag2'], None, "'sid_broken'"]]
def test_no_ignore_saved_search_errors_breaks(self):
"""
When 1 saved search fails with Check Exception, the code
should continue and send topology if issues are ignored.
"""
instance = {
'url': 'http://localhost:8089',
'ignore_saved_search_errors': False,
'authentication': {
'basic_auth': {
'username': "adminNew",
'password': "<PASSWORD>"
}
},
'tags': ['mytag', 'mytag2']
}
instance = SplunkInstanceConfig(instance, {}, mock_defaults)
searches = SavedSearches(instance, self.mock_splunk_client,
[SplunkSavedSearch(instance, {'name': 'search_broken'}),
SplunkSavedSearch(instance, {'name': 'search1'})])
self.mock_splunk_client.dispatch_results['search_broken'] = 'sid_broken'
self.mock_splunk_client.dispatch_results['search1'] = 'sid1'
data1 = {'messages': [], 'results': [{'data': 'result1'}]}
self.mock_splunk_client.saved_search_results_results['sid1'] = [data1]
issue = True
try:
searches.run_saved_searches(self.mock_process_data.function, self.mock_service_check.function, self.log,
self.committable_state)
issue = False
except Exception:
issue = True
assert issue
assert self.mock_process_data.results == []
assert self.mock_service_check.results == []
def test_incomplete_and_failing_produce_warnings(self):
"""
When both saved search fails with Check Exception, the code should continue and send topology.
"""
instance = {
'url': 'http://localhost:8089',
'authentication': {
'basic_auth': {
'username': "admin",
'password': "<PASSWORD>"
}
},
'ignore_saved_search_errors': True,
}
instance = SplunkInstanceConfig(instance, {}, mock_defaults)
searches = SavedSearches(instance, self.mock_splunk_client, [SplunkSavedSearch(instance, {'name': 'search1'}),
SplunkSavedSearch(instance,
{'name': 'search_broken'})])
self.mock_splunk_client.dispatch_results['search1'] = 'sid1'
self.mock_splunk_client.dispatch_results['search_broken'] = 'broken_sid'
data1 = {'messages': [], 'results': [{'data': 'result1'}]}
self.mock_splunk_client.saved_search_results_results['sid1'] = [data1]
self.mock_process_data.ret_val = 1
searches.run_saved_searches(self.mock_process_data.function, self.mock_service_check.function, self.log,
self.committable_state)
assert self.mock_service_check.results == [
[AgentCheck.WARNING, [], None,
"All result of saved search 'search1' contained incomplete data"],
[AgentCheck.WARNING, [], None, "'broken_sid'"]] | 0.603114 | 0.219526 |
from shutil import copytree, move
from tempfile import TemporaryDirectory
from pathlib import Path
from unittest import skip
from capanno_utils.repo_config import tools_dir_name
from tests.test_base import TestBase
from capanno_utils.add_content import main as add_content_main
from capanno_utils.helpers.get_paths import *
# @skip('')
class TestAddToolMain(TestBase):
# @skip('')
def test_add_tool(self):
with TemporaryDirectory(prefix='test_add_tool_') as tmp_dir:
self.make_empty_tools_index(tmp_dir)
tool_name = 'test_1'
tool_version = 'fake.1'
add_content_main(['--no-refresh-index', '-p', tmp_dir, 'tool', tool_name, tool_version, "--has-primary"])
return
# @skip('')
def test_add_tool_with_subtools(self):
with TemporaryDirectory(prefix='test_add_w_subtools_') as tmp_dir:
self.make_empty_tools_index(tmp_dir)
tool_name = 'test_2'
tool_version = 'fake.2'
subtools = ['subtool1', 'subtool2', 'subtool3']
add_content_main(['--no-refresh-index', '-p', tmp_dir, 'tool', tool_name, tool_version] + subtools)
return
# @skip('')
def test_add_tool_with_biotools_id(self):
with TemporaryDirectory(prefix='with_biotools_') as tmp_dir:
self.make_empty_tools_index(tmp_dir)
tool_name = 'test_biotools'
tool_version = 'fake.3'
biotools_id = 'malvirus'
subtools = ['subtool1', 'subtool2', 'subtool3']
options = ['--biotoolsID', biotools_id]
add_content_main(['--no-refresh-index', '-p', tmp_dir, 'tool', tool_name, tool_version] + subtools + options)
return
def test_add_tool_with_subtools_with_init_urls(self):
cwl_url = 'https://raw.githubusercontent.com/common-workflow-library/bio-cwl-tools/release/bandage/bandage-image.cwl'
tool_name = 'test_biotools'
tool_version = 'fake.3'
biotools_id = 'malvirus'
subtools = ['subtool1', 'subtool2', 'subtool3']
options = ['--biotoolsID', biotools_id]
init_cwl = ['--init-cwl', f"subtool1={cwl_url}", f"subtool2={cwl_url}"]
with TemporaryDirectory(prefix='add_with_url') as tmp_dir:
self.make_empty_tools_index(tmp_dir)
add_content_main(
['--no-refresh-index', '-p', tmp_dir, 'tool', tool_name, tool_version] + subtools + options + init_cwl)
def test_add_tool_with_subtools_with_init_url_main_only(self):
cwl_url = 'https://raw.githubusercontent.com/common-workflow-library/bio-cwl-tools/release/bandage/bandage-image.cwl'
tool_name = 'test_biotools'
tool_version = 'fake.3'
biotools_id = 'malvirus'
options = ['--biotoolsID', biotools_id, '--has-primary']
init_cwl = ['--init-cwl', cwl_url]
with TemporaryDirectory(prefix='add_with_url') as tmp_dir:
self.make_empty_tools_index(tmp_dir)
add_content_main(
['--no-refresh-index', '-p', tmp_dir, 'tool', tool_name, tool_version] + options + init_cwl)
# @skip('')
def test_add_subtool(self):
with TemporaryDirectory(prefix='add_subtool_') as tmp_dir:
self.make_empty_tools_index(tmp_dir)
tool_name = 'test_4'
tool_version = 'fake.4'
subtool_name = 'new_subtool'
add_content_main(['--no-refresh-index', '-p', tmp_dir, 'tool', tool_name, tool_version])
add_content_main(['-p', tmp_dir, 'subtool', tool_name, tool_version, subtool_name, '-u'])
return
def test_add_tool_with_existing_cwl_url(self):
cwl_url = 'https://raw.githubusercontent.com/common-workflow-library/bio-cwl-tools/release/bandage/bandage-image.cwl'
biotools_id = 'bandage'
tool_name = 'bandage'
tool_version = 'fake.6'
subtool_name = 'image'
with TemporaryDirectory(prefix='add_tool_with_cwl_') as tmp_dir:
self.make_empty_tools_index(tmp_dir)
add_content_main(['--no-refresh-index', '-p', tmp_dir, 'tool', tool_name, tool_version, '--biotoolsID', biotools_id])
add_content_main(['-p', tmp_dir, 'subtool', tool_name, tool_version, subtool_name, '-u', '--init-cwl', cwl_url])
assert True
return
def test_add_tool_instance(self):
tool_name = 'STAR'
tool_version = '2.5'
subtool_name = 'alignReads'
tool_directory = get_main_tool_dir(tool_name, base_dir=self.test_content_dir)
with TemporaryDirectory() as tmp_dir:
self.make_empty_tools_index(tmp_dir, add_identifiers=['TL_8ab263.82', 'TL_8ab263_a4.82'])
tool_temp_path = Path(tmp_dir) / tools_dir_name / tool_name
copytree(tool_directory, tool_temp_path)
add_content_main(['-p', tmp_dir, 'tool-instance', tool_name, tool_version, subtool_name])
assert True # Just provides a place for a breakpoint to take a look.at tmp_dir
return
def test_add_tool_instance_main(self):
tool_name = 'cat'
tool_version = '8.x'
tool_directory = get_main_tool_dir(tool_name, base_dir=self.test_content_dir)
with TemporaryDirectory() as tmp_dir:
self.make_empty_tools_index(tmp_dir, add_identifiers=['TL_d077f2.47', 'TL_d077f2_54.47'])
tool_temp_path = Path(tmp_dir) / tools_dir_name / tool_name
copytree(tool_directory, tool_temp_path)
add_content_main(['-p', tmp_dir, 'tool-instance', tool_name, tool_version])
assert True # Just provides a place for a breakpoint to take a look.at tmp_dir
return
def test_add_tool_with_wdl_url(self):
wdl_url = 'https://github.com/broadinstitute/warp/raw/develop/tasks/skylab/CreateCountMatrix.wdl'
tool_name = 'who_cares'
tool_version = 'fake.fake'
subtool_name = 'test_wdl_subtool'
with TemporaryDirectory(prefix='add_tool_with_wdl') as tmp_dir:
self.make_empty_tools_index(tmp_dir)
add_content_main(
['--no-refresh-index', '-p', tmp_dir, 'tool', tool_name, tool_version])
add_content_main(
['-p', tmp_dir, 'subtool', tool_name, tool_version, subtool_name, '-u', '--init-wdl', wdl_url])
assert True
return
# @skip('')
class TestAddScriptMain(TestBase):
def test_add_common_script(self):
with TemporaryDirectory() as tmp_dir:
group_name = 'test_group1'
project_name = 'fake_project_1'
script_version = '1.nope'
file_name = "some_filename"
add_content_main(['-p', tmp_dir, 'common-script', group_name, project_name, script_version, file_name])
return
def test_add_script(self):
with TemporaryDirectory() as tmp_dir:
group_name = 'test_group2'
project_name = 'fake_project_2'
script_name = 'new_script_2'
script_version = '2.nope'
add_content_main(['-p', tmp_dir, 'script', group_name, project_name, script_version, script_name])
return | tests/test_add_content.py | from shutil import copytree, move
from tempfile import TemporaryDirectory
from pathlib import Path
from unittest import skip
from capanno_utils.repo_config import tools_dir_name
from tests.test_base import TestBase
from capanno_utils.add_content import main as add_content_main
from capanno_utils.helpers.get_paths import *
# @skip('')
class TestAddToolMain(TestBase):
# @skip('')
def test_add_tool(self):
with TemporaryDirectory(prefix='test_add_tool_') as tmp_dir:
self.make_empty_tools_index(tmp_dir)
tool_name = 'test_1'
tool_version = 'fake.1'
add_content_main(['--no-refresh-index', '-p', tmp_dir, 'tool', tool_name, tool_version, "--has-primary"])
return
# @skip('')
def test_add_tool_with_subtools(self):
with TemporaryDirectory(prefix='test_add_w_subtools_') as tmp_dir:
self.make_empty_tools_index(tmp_dir)
tool_name = 'test_2'
tool_version = 'fake.2'
subtools = ['subtool1', 'subtool2', 'subtool3']
add_content_main(['--no-refresh-index', '-p', tmp_dir, 'tool', tool_name, tool_version] + subtools)
return
# @skip('')
def test_add_tool_with_biotools_id(self):
with TemporaryDirectory(prefix='with_biotools_') as tmp_dir:
self.make_empty_tools_index(tmp_dir)
tool_name = 'test_biotools'
tool_version = 'fake.3'
biotools_id = 'malvirus'
subtools = ['subtool1', 'subtool2', 'subtool3']
options = ['--biotoolsID', biotools_id]
add_content_main(['--no-refresh-index', '-p', tmp_dir, 'tool', tool_name, tool_version] + subtools + options)
return
def test_add_tool_with_subtools_with_init_urls(self):
cwl_url = 'https://raw.githubusercontent.com/common-workflow-library/bio-cwl-tools/release/bandage/bandage-image.cwl'
tool_name = 'test_biotools'
tool_version = 'fake.3'
biotools_id = 'malvirus'
subtools = ['subtool1', 'subtool2', 'subtool3']
options = ['--biotoolsID', biotools_id]
init_cwl = ['--init-cwl', f"subtool1={cwl_url}", f"subtool2={cwl_url}"]
with TemporaryDirectory(prefix='add_with_url') as tmp_dir:
self.make_empty_tools_index(tmp_dir)
add_content_main(
['--no-refresh-index', '-p', tmp_dir, 'tool', tool_name, tool_version] + subtools + options + init_cwl)
def test_add_tool_with_subtools_with_init_url_main_only(self):
cwl_url = 'https://raw.githubusercontent.com/common-workflow-library/bio-cwl-tools/release/bandage/bandage-image.cwl'
tool_name = 'test_biotools'
tool_version = 'fake.3'
biotools_id = 'malvirus'
options = ['--biotoolsID', biotools_id, '--has-primary']
init_cwl = ['--init-cwl', cwl_url]
with TemporaryDirectory(prefix='add_with_url') as tmp_dir:
self.make_empty_tools_index(tmp_dir)
add_content_main(
['--no-refresh-index', '-p', tmp_dir, 'tool', tool_name, tool_version] + options + init_cwl)
# @skip('')
def test_add_subtool(self):
with TemporaryDirectory(prefix='add_subtool_') as tmp_dir:
self.make_empty_tools_index(tmp_dir)
tool_name = 'test_4'
tool_version = 'fake.4'
subtool_name = 'new_subtool'
add_content_main(['--no-refresh-index', '-p', tmp_dir, 'tool', tool_name, tool_version])
add_content_main(['-p', tmp_dir, 'subtool', tool_name, tool_version, subtool_name, '-u'])
return
def test_add_tool_with_existing_cwl_url(self):
cwl_url = 'https://raw.githubusercontent.com/common-workflow-library/bio-cwl-tools/release/bandage/bandage-image.cwl'
biotools_id = 'bandage'
tool_name = 'bandage'
tool_version = 'fake.6'
subtool_name = 'image'
with TemporaryDirectory(prefix='add_tool_with_cwl_') as tmp_dir:
self.make_empty_tools_index(tmp_dir)
add_content_main(['--no-refresh-index', '-p', tmp_dir, 'tool', tool_name, tool_version, '--biotoolsID', biotools_id])
add_content_main(['-p', tmp_dir, 'subtool', tool_name, tool_version, subtool_name, '-u', '--init-cwl', cwl_url])
assert True
return
def test_add_tool_instance(self):
tool_name = 'STAR'
tool_version = '2.5'
subtool_name = 'alignReads'
tool_directory = get_main_tool_dir(tool_name, base_dir=self.test_content_dir)
with TemporaryDirectory() as tmp_dir:
self.make_empty_tools_index(tmp_dir, add_identifiers=['TL_8ab263.82', 'TL_8ab263_a4.82'])
tool_temp_path = Path(tmp_dir) / tools_dir_name / tool_name
copytree(tool_directory, tool_temp_path)
add_content_main(['-p', tmp_dir, 'tool-instance', tool_name, tool_version, subtool_name])
assert True # Just provides a place for a breakpoint to take a look.at tmp_dir
return
def test_add_tool_instance_main(self):
tool_name = 'cat'
tool_version = '8.x'
tool_directory = get_main_tool_dir(tool_name, base_dir=self.test_content_dir)
with TemporaryDirectory() as tmp_dir:
self.make_empty_tools_index(tmp_dir, add_identifiers=['TL_d077f2.47', 'TL_d077f2_54.47'])
tool_temp_path = Path(tmp_dir) / tools_dir_name / tool_name
copytree(tool_directory, tool_temp_path)
add_content_main(['-p', tmp_dir, 'tool-instance', tool_name, tool_version])
assert True # Just provides a place for a breakpoint to take a look.at tmp_dir
return
def test_add_tool_with_wdl_url(self):
wdl_url = 'https://github.com/broadinstitute/warp/raw/develop/tasks/skylab/CreateCountMatrix.wdl'
tool_name = 'who_cares'
tool_version = 'fake.fake'
subtool_name = 'test_wdl_subtool'
with TemporaryDirectory(prefix='add_tool_with_wdl') as tmp_dir:
self.make_empty_tools_index(tmp_dir)
add_content_main(
['--no-refresh-index', '-p', tmp_dir, 'tool', tool_name, tool_version])
add_content_main(
['-p', tmp_dir, 'subtool', tool_name, tool_version, subtool_name, '-u', '--init-wdl', wdl_url])
assert True
return
# @skip('')
class TestAddScriptMain(TestBase):
def test_add_common_script(self):
with TemporaryDirectory() as tmp_dir:
group_name = 'test_group1'
project_name = 'fake_project_1'
script_version = '1.nope'
file_name = "some_filename"
add_content_main(['-p', tmp_dir, 'common-script', group_name, project_name, script_version, file_name])
return
def test_add_script(self):
with TemporaryDirectory() as tmp_dir:
group_name = 'test_group2'
project_name = 'fake_project_2'
script_name = 'new_script_2'
script_version = '2.nope'
add_content_main(['-p', tmp_dir, 'script', group_name, project_name, script_version, script_name])
return | 0.386416 | 0.176476 |
import tkinter as tk
import tkinter.ttk as ttk
import TestUrl
import sys
import tkinter.filedialog
import Tools
class TestUrlFrame(tk.Frame):
def __init__(self, master=None):
tk.Frame.__init__(self, master)
self.root = master
self.param1_filepath = tk.StringVar()
self.param2_filepath = tk.StringVar()
self.url_value = tk.StringVar()
self.url_value.set("http://localhost:3000/login/loginSub")
self.reqheaders_value = tk.StringVar()
self.creatPage()
def creatPage(self):
tk.Label(self, text="测试URL:").grid(
row=0, column=0, sticky='W')
self.url_entry = tk.Entry(self, width=60, textvariable=self.url_value)
self.url_entry.grid(row=0, column=1, sticky='W')
tk.Label(self, text="提交方式:").grid(row=1, column=0, sticky='W')
self.httpsub_combox = ttk.Combobox(self)
self.httpsub_combox['values'] = ["GET", "POST"]
self.httpsub_combox.grid(row=1, column=1, sticky='W')
tk.Label(self, text="头部信息:").grid(row=2, column=0, sticky='W')
# 创建文本框text,设置宽度100,high不是高度,是文本显示的行数设置为2行
self.reqheaders_text = tk.Text(self, width=60, height=8)
self.reqheaders_text.insert(
1.0, {"User-Agent": "Mozilla/5.0 (Windows NT 6.3; Win64; x64)"
"AppleWebKit/537.36 (KHTML, like Gecko)"
"Chrome/65.0.3325.181 Safari/537.36",
"Referer": "http://localhost:3000/"})
self.reqheaders_text.grid(row=2, column=1, sticky='W')
# 创建文本框侧边滚动条,主要方法有两个,将滚动条command方法设置为文本框的yview,将文本框['yscrollcommand']设置为滚动条的set方法
self.sl1 = tk.Scrollbar(self, orient=tk.VERTICAL,
command=self.reqheaders_text.yview)
self.sl1.grid(row=2, column=2, sticky=tk.N+tk.S)
self.reqheaders_text['yscrollcommand'] = self.sl1.set
# 参数选择frame初始化
self.initParamFrm()
self.res_text = tk.Text(self, width=60, height=12)
self.res_text.grid(row=4, column=1)
# 参数选择frame初始化
def initParamFrm(self):
self.param_frame = tk.Frame(self)
self.param_frame.grid(row=3, column=0, columnspan=2, sticky='W')
tk.Label(self.param_frame, text="参数1名:").grid(
row=0, column=0, sticky='W')
self.param1key_entry = tk.Entry(self.param_frame, width=10)
self.param1key_entry.grid(row=0, column=1, sticky='W')
tk.Label(self.param_frame, text="参数1测试值选择:",).grid(
row=0, column=2, sticky='W')
self.param1_entry = tk.Entry(
self.param_frame, textvariable=self.param1_filepath, width=30)
self.param1_entry.grid(row=0, column=3, sticky='W')
tk.Button(self.param_frame, text="文件选择",
# command=lambda: self.selectPath(self.param1_filepath)).grid(row=0, column=4)
command=lambda: Tools.Tools.selectPath(self.param1_filepath)).grid(row=0, column=4)
tk.Label(self.param_frame, text="参数2名:").grid(
row=1, column=0, sticky='W')
self.param2key_entry = tk.Entry(self.param_frame, width=10)
self.param2key_entry.grid(row=1, column=1, sticky='W')
tk.Label(self.param_frame, text="参数2测试值选择:").grid(
row=1, column=2, sticky='W')
self.param2_entry = tk.Entry(
self.param_frame, textvariable=self.param2_filepath, width=30)
self.param2_entry.grid(row=1, column=3, sticky='W')
tk.Button(self.param_frame, text="文件选择",
command=lambda: Tools.Tools.selectPath(self.param2_filepath)).grid(row=1, column=4)
self.btn = tk.Button(self, text="btn", width=6, command=self._go)
self.btn.grid()
def getBack(self):
self.url = self.url_entry.get()
self.headers = eval(self.reqheaders_text.get('0.0', 'end'))
print(self.param1_entry.get())
return self.url, self.headers
# 原始的文件选择方法 移到了Tools文件里
# def selectPath(self, en):
# path = tkinter.filedialog.askopenfilename()
# en.set(path)
def _go(self):
url, headers = self.getBack()
param1_key = self.param1key_entry.get()
param2_key = self.param2key_entry.get()
param1_value = Tools.Tools.returnJson2List(self.param1_filepath.get())
param2_value = Tools.Tools.returnJson2List(self.param2_filepath.get())
# self.url = self.url_entry.get()
# self.headers = eval(self.reqheaders_text.get('0.0', 'end'))
# self.f = TestUrl.ReadFile("D:/projects/python36/Tool-Set/1.json")
ok_num = 0 # 成功用例计数
fail_num = 0 # 失败用例计数
ok_list = [] # 成功用例列表,若为字典的话因为值不同导致被覆盖
fail_list = [] # 失败用例列表
try:
for i in param1_value:
for j in param2_value:
params = {param1_key: i, param2_key: j}
try:
test_login = TestUrl.TLoginPOST(url, headers, params)
t_req = test_login.get_back()
# 由于字典的键为code 所以会不断进行覆盖,变成了更新而不是拼接
item = dict(t_req, **i)
if t_req["code"] == "200":
ok_num += 1
ok_list.append(item)
else:
fail_num += 1
fail_list.append(item)
num = ok_num + fail_num
except:
t = '%s%s' % ("连接错误!", sys.exc_info()[0])
print(t)
self.res_text.delete(1.0, "end")
self.res_text.insert(1.0, t)
t = '%s%s%s%s%s%s%s%s%s%s%s%s%s' % ("共测试", num, "个用例,", "\n", "其中成功",
ok_num, "个,失败", fail_num, "个", "\n", "成功的用例为:", "\n", ok_list)
self.res_text.delete(1.0, "end")
self.res_text.insert(1.0, t)
except:
print('文件读取错误!')
if __name__ == '__main__':
root = tk.Tk()
root.title("Tool Set")
width = 600
height = 660
root.geometry(
'%dx%d+%d+%d' % (width, height, (root.winfo_screenwidth() - width)/2,
(root.winfo_screenheight() - height)/2))
frm = TestUrlFrame(root)
frm.grid()
root.mainloop() | TestUrlFrame.py | import tkinter as tk
import tkinter.ttk as ttk
import TestUrl
import sys
import tkinter.filedialog
import Tools
class TestUrlFrame(tk.Frame):
def __init__(self, master=None):
tk.Frame.__init__(self, master)
self.root = master
self.param1_filepath = tk.StringVar()
self.param2_filepath = tk.StringVar()
self.url_value = tk.StringVar()
self.url_value.set("http://localhost:3000/login/loginSub")
self.reqheaders_value = tk.StringVar()
self.creatPage()
def creatPage(self):
tk.Label(self, text="测试URL:").grid(
row=0, column=0, sticky='W')
self.url_entry = tk.Entry(self, width=60, textvariable=self.url_value)
self.url_entry.grid(row=0, column=1, sticky='W')
tk.Label(self, text="提交方式:").grid(row=1, column=0, sticky='W')
self.httpsub_combox = ttk.Combobox(self)
self.httpsub_combox['values'] = ["GET", "POST"]
self.httpsub_combox.grid(row=1, column=1, sticky='W')
tk.Label(self, text="头部信息:").grid(row=2, column=0, sticky='W')
# 创建文本框text,设置宽度100,high不是高度,是文本显示的行数设置为2行
self.reqheaders_text = tk.Text(self, width=60, height=8)
self.reqheaders_text.insert(
1.0, {"User-Agent": "Mozilla/5.0 (Windows NT 6.3; Win64; x64)"
"AppleWebKit/537.36 (KHTML, like Gecko)"
"Chrome/65.0.3325.181 Safari/537.36",
"Referer": "http://localhost:3000/"})
self.reqheaders_text.grid(row=2, column=1, sticky='W')
# 创建文本框侧边滚动条,主要方法有两个,将滚动条command方法设置为文本框的yview,将文本框['yscrollcommand']设置为滚动条的set方法
self.sl1 = tk.Scrollbar(self, orient=tk.VERTICAL,
command=self.reqheaders_text.yview)
self.sl1.grid(row=2, column=2, sticky=tk.N+tk.S)
self.reqheaders_text['yscrollcommand'] = self.sl1.set
# 参数选择frame初始化
self.initParamFrm()
self.res_text = tk.Text(self, width=60, height=12)
self.res_text.grid(row=4, column=1)
# 参数选择frame初始化
def initParamFrm(self):
self.param_frame = tk.Frame(self)
self.param_frame.grid(row=3, column=0, columnspan=2, sticky='W')
tk.Label(self.param_frame, text="参数1名:").grid(
row=0, column=0, sticky='W')
self.param1key_entry = tk.Entry(self.param_frame, width=10)
self.param1key_entry.grid(row=0, column=1, sticky='W')
tk.Label(self.param_frame, text="参数1测试值选择:",).grid(
row=0, column=2, sticky='W')
self.param1_entry = tk.Entry(
self.param_frame, textvariable=self.param1_filepath, width=30)
self.param1_entry.grid(row=0, column=3, sticky='W')
tk.Button(self.param_frame, text="文件选择",
# command=lambda: self.selectPath(self.param1_filepath)).grid(row=0, column=4)
command=lambda: Tools.Tools.selectPath(self.param1_filepath)).grid(row=0, column=4)
tk.Label(self.param_frame, text="参数2名:").grid(
row=1, column=0, sticky='W')
self.param2key_entry = tk.Entry(self.param_frame, width=10)
self.param2key_entry.grid(row=1, column=1, sticky='W')
tk.Label(self.param_frame, text="参数2测试值选择:").grid(
row=1, column=2, sticky='W')
self.param2_entry = tk.Entry(
self.param_frame, textvariable=self.param2_filepath, width=30)
self.param2_entry.grid(row=1, column=3, sticky='W')
tk.Button(self.param_frame, text="文件选择",
command=lambda: Tools.Tools.selectPath(self.param2_filepath)).grid(row=1, column=4)
self.btn = tk.Button(self, text="btn", width=6, command=self._go)
self.btn.grid()
def getBack(self):
self.url = self.url_entry.get()
self.headers = eval(self.reqheaders_text.get('0.0', 'end'))
print(self.param1_entry.get())
return self.url, self.headers
# 原始的文件选择方法 移到了Tools文件里
# def selectPath(self, en):
# path = tkinter.filedialog.askopenfilename()
# en.set(path)
def _go(self):
url, headers = self.getBack()
param1_key = self.param1key_entry.get()
param2_key = self.param2key_entry.get()
param1_value = Tools.Tools.returnJson2List(self.param1_filepath.get())
param2_value = Tools.Tools.returnJson2List(self.param2_filepath.get())
# self.url = self.url_entry.get()
# self.headers = eval(self.reqheaders_text.get('0.0', 'end'))
# self.f = TestUrl.ReadFile("D:/projects/python36/Tool-Set/1.json")
ok_num = 0 # 成功用例计数
fail_num = 0 # 失败用例计数
ok_list = [] # 成功用例列表,若为字典的话因为值不同导致被覆盖
fail_list = [] # 失败用例列表
try:
for i in param1_value:
for j in param2_value:
params = {param1_key: i, param2_key: j}
try:
test_login = TestUrl.TLoginPOST(url, headers, params)
t_req = test_login.get_back()
# 由于字典的键为code 所以会不断进行覆盖,变成了更新而不是拼接
item = dict(t_req, **i)
if t_req["code"] == "200":
ok_num += 1
ok_list.append(item)
else:
fail_num += 1
fail_list.append(item)
num = ok_num + fail_num
except:
t = '%s%s' % ("连接错误!", sys.exc_info()[0])
print(t)
self.res_text.delete(1.0, "end")
self.res_text.insert(1.0, t)
t = '%s%s%s%s%s%s%s%s%s%s%s%s%s' % ("共测试", num, "个用例,", "\n", "其中成功",
ok_num, "个,失败", fail_num, "个", "\n", "成功的用例为:", "\n", ok_list)
self.res_text.delete(1.0, "end")
self.res_text.insert(1.0, t)
except:
print('文件读取错误!')
if __name__ == '__main__':
root = tk.Tk()
root.title("Tool Set")
width = 600
height = 660
root.geometry(
'%dx%d+%d+%d' % (width, height, (root.winfo_screenwidth() - width)/2,
(root.winfo_screenheight() - height)/2))
frm = TestUrlFrame(root)
frm.grid()
root.mainloop() | 0.145692 | 0.109135 |
import argparse
import glob
import json
import os
import shutil
from q2t.thermo import get_thermo
from q2t.qchem import QChem
from q2t.mol import str_to_rmg_mol, geo_to_rmg_mol, geo_to_xyz_str
def main():
args = parse_args()
if args.bacs is not None:
with open(args.bacs) as f:
bacs = json.load(f)
else:
bacs = None
with open(args.identifier_file) as f:
identifiers = [line.strip() for line in f if line.strip()]
if args.exceptions_file is not None:
with open(args.exceptions_file) as f:
exceptions = {line.strip() for line in f if line.strip()}
else:
exceptions = set()
if args.energy_dir is None:
optfreq_logs = {int(os.path.basename(log).split('.')[0]): log
for log in glob.iglob(os.path.join(args.optfreq_dir, '[0-9]*.out'))}
energy_logs = {i: None for i in optfreq_logs}
else:
energy_logs = {int(os.path.basename(log).split('.')[0]): log
for log in glob.iglob(os.path.join(args.energy_dir, '[0-9]*.out'))}
optfreq_logs = {i: os.path.join(args.optfreq_dir, '{}.out'.format(i)) for i in energy_logs}
out_dir = args.out_dir
scr_dir = os.path.join(out_dir, 'SYMM_SCRATCH')
if not os.path.exists(out_dir):
os.makedirs(out_dir)
if not os.path.exists(scr_dir):
os.mkdir(scr_dir)
thermo, geos = {}, {}
for i, energy_log in energy_logs.iteritems():
optfreq_log = optfreq_logs[i]
identifier = identifiers[i]
try:
geo = QChem(logfile=optfreq_log).get_geometry()
except IOError:
print('Missing optfreq file {}!'.format(optfreq_log))
continue
mol_check = str_to_rmg_mol(identifier, single_bonds=True)
mol = geo_to_rmg_mol(geo)
if identifier in exceptions or mol_check.isIsomorphic(mol):
thermo[identifier] = get_thermo(optfreq_log, args.freq_level, args.model_chemistry, energy_log=energy_log,
mol=mol, bacs=bacs, soc=args.soc,
infer_symmetry=args.symmetry, infer_chirality=args.chirality,
unique_id=str(i), scr_dir=scr_dir)
geos[identifier] = geo
else:
print('Ignored {}: {} does not match parsed geometry!'.format(optfreq_log, identifier))
shutil.rmtree(scr_dir)
hpath = os.path.join(out_dir, 'hf298.csv')
spath = os.path.join(out_dir, 's298.csv')
cpath = os.path.join(out_dir, 'cp.csv')
gpath = os.path.join(out_dir, 'geos.xyz')
with open(hpath, 'w') as hf, open(spath, 'w') as sf, open(cpath, 'w') as cf, open(gpath, 'w') as gf:
for identifier in thermo:
hf298, s298, cp = thermo[identifier]
geo = geos[identifier]
hf.write('{} {}\n'.format(identifier, hf298))
sf.write('{} {}\n'.format(identifier, s298))
cf.write('{0} {1[0]} {1[1]} {1[2]} {1[3]} {1[4]} {1[5]} {1[6]}\n'.format(identifier, cp))
gf.write(geo_to_xyz_str(geo))
def parse_args():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('identifier_file', help='File containing molecule identifiers in order')
parser.add_argument('optfreq_dir', help='Directory containing Q-Chem optfreq jobs')
parser.add_argument('out_dir', help='Output directory')
parser.add_argument('--energy_dir', help='Directory containing Molpro energy jobs (optional)')
parser.add_argument('--model_chemistry', default='ccsd(t)-f12a/cc-pvdz-f12', help='Level of theory for energy')
parser.add_argument('--freq_level', default='wb97x-d3/def2-tzvp', help='Level of theory for frequencies')
parser.add_argument('--soc', action='store_true', help='Use spin-orbit corrections')
parser.add_argument('--exceptions_file', help='File containing molecule identifiers that override '
'match checking of true and parsed geometry')
parser.add_argument('--bacs', help='.json file containing BACs in kcal/mol')
parser.add_argument('--symmetry', action='store_true', help='Infer symmetry')
parser.add_argument('--chirality', action='store_true', help='Infer chirality')
return parser.parse_args()
if __name__ == '__main__':
main() | compute_thermo.py |
import argparse
import glob
import json
import os
import shutil
from q2t.thermo import get_thermo
from q2t.qchem import QChem
from q2t.mol import str_to_rmg_mol, geo_to_rmg_mol, geo_to_xyz_str
def main():
args = parse_args()
if args.bacs is not None:
with open(args.bacs) as f:
bacs = json.load(f)
else:
bacs = None
with open(args.identifier_file) as f:
identifiers = [line.strip() for line in f if line.strip()]
if args.exceptions_file is not None:
with open(args.exceptions_file) as f:
exceptions = {line.strip() for line in f if line.strip()}
else:
exceptions = set()
if args.energy_dir is None:
optfreq_logs = {int(os.path.basename(log).split('.')[0]): log
for log in glob.iglob(os.path.join(args.optfreq_dir, '[0-9]*.out'))}
energy_logs = {i: None for i in optfreq_logs}
else:
energy_logs = {int(os.path.basename(log).split('.')[0]): log
for log in glob.iglob(os.path.join(args.energy_dir, '[0-9]*.out'))}
optfreq_logs = {i: os.path.join(args.optfreq_dir, '{}.out'.format(i)) for i in energy_logs}
out_dir = args.out_dir
scr_dir = os.path.join(out_dir, 'SYMM_SCRATCH')
if not os.path.exists(out_dir):
os.makedirs(out_dir)
if not os.path.exists(scr_dir):
os.mkdir(scr_dir)
thermo, geos = {}, {}
for i, energy_log in energy_logs.iteritems():
optfreq_log = optfreq_logs[i]
identifier = identifiers[i]
try:
geo = QChem(logfile=optfreq_log).get_geometry()
except IOError:
print('Missing optfreq file {}!'.format(optfreq_log))
continue
mol_check = str_to_rmg_mol(identifier, single_bonds=True)
mol = geo_to_rmg_mol(geo)
if identifier in exceptions or mol_check.isIsomorphic(mol):
thermo[identifier] = get_thermo(optfreq_log, args.freq_level, args.model_chemistry, energy_log=energy_log,
mol=mol, bacs=bacs, soc=args.soc,
infer_symmetry=args.symmetry, infer_chirality=args.chirality,
unique_id=str(i), scr_dir=scr_dir)
geos[identifier] = geo
else:
print('Ignored {}: {} does not match parsed geometry!'.format(optfreq_log, identifier))
shutil.rmtree(scr_dir)
hpath = os.path.join(out_dir, 'hf298.csv')
spath = os.path.join(out_dir, 's298.csv')
cpath = os.path.join(out_dir, 'cp.csv')
gpath = os.path.join(out_dir, 'geos.xyz')
with open(hpath, 'w') as hf, open(spath, 'w') as sf, open(cpath, 'w') as cf, open(gpath, 'w') as gf:
for identifier in thermo:
hf298, s298, cp = thermo[identifier]
geo = geos[identifier]
hf.write('{} {}\n'.format(identifier, hf298))
sf.write('{} {}\n'.format(identifier, s298))
cf.write('{0} {1[0]} {1[1]} {1[2]} {1[3]} {1[4]} {1[5]} {1[6]}\n'.format(identifier, cp))
gf.write(geo_to_xyz_str(geo))
def parse_args():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('identifier_file', help='File containing molecule identifiers in order')
parser.add_argument('optfreq_dir', help='Directory containing Q-Chem optfreq jobs')
parser.add_argument('out_dir', help='Output directory')
parser.add_argument('--energy_dir', help='Directory containing Molpro energy jobs (optional)')
parser.add_argument('--model_chemistry', default='ccsd(t)-f12a/cc-pvdz-f12', help='Level of theory for energy')
parser.add_argument('--freq_level', default='wb97x-d3/def2-tzvp', help='Level of theory for frequencies')
parser.add_argument('--soc', action='store_true', help='Use spin-orbit corrections')
parser.add_argument('--exceptions_file', help='File containing molecule identifiers that override '
'match checking of true and parsed geometry')
parser.add_argument('--bacs', help='.json file containing BACs in kcal/mol')
parser.add_argument('--symmetry', action='store_true', help='Infer symmetry')
parser.add_argument('--chirality', action='store_true', help='Infer chirality')
return parser.parse_args()
if __name__ == '__main__':
main() | 0.331877 | 0.172712 |
"""Utility code for converting between absl logging output and log protos."""
import contextlib
import datetime
import pathlib
import re
import sys
import typing
from absl import logging
from labm8.py import app
from labm8.py import labdate
from labm8.py.internal import logging_pb2
FLAGS = app.FLAGS
# A regular expression to match the components of an absl logging prefix. See:
# https://github.com/abseil/abseil-py/blob/e69e200f680a20c50e0e2cd9e74e9850ff69b856/absl/logging/__init__.py#L554-L583
ABSL_LOGGING_LINE_RE = re.compile(
r"(?P<lvl>[IWEF])(?P<timestamp>\d\d\d\d \d\d:\d\d:\d\d.\d\d\d\d\d\d) "
r"(?P<thread_id>\d+) (?P<filename>[^:]+):(?P<lineno>\d+)] "
r"(?P<contents>.*)",
)
# Convert a single letter absl logging prefix to a LogRecord.LogLevel. Since
# absl logging uses the same prefix for logging.DEBUG and logging.INFO, this
# conversion is lossy, as LogRecord.DEBUG is never returned.
ABSL_LEVEL_TO_LOG_RECORD_LEVEL = {
"I": logging_pb2.LogRecord.INFO,
"W": logging_pb2.LogRecord.WARNING,
"E": logging_pb2.LogRecord.ERROR,
"F": logging_pb2.LogRecord.FATAL,
}
def DatetimeFromAbslTimestamp(
timestamp: str, year: int = datetime.datetime.utcnow().year,
) -> datetime.datetime:
"""Convert absl logging timestamp to datetime.
WARNING: Absl logs do not include the year, so if parsing logs from previous
years, be sure to set the year argument! The default value assumes the logs
are from the current year.
Args:
timestamp: The string timestamp.
year: The year, as an integer. E.g. 2019.
Returns:
A datetime.
"""
dt = datetime.datetime.strptime(str(year) + timestamp, "%Y%m%d %H:%M:%S.%f")
return dt
def ConertAbslLogToProtos(
logs: str, year: int = datetime.datetime.utcnow().year,
) -> typing.List[logging_pb2.LogRecord]:
"""Convert the output of logging with absl logging to LogRecord protos.
WARNING: Absl logs do not include the year, so if parsing logs from previous
years, be sure to set the year argument! The default value assumes the logs
are from the current year.
Args:
logs: The output from logging with absl.
year: The year, as an integer. E.g. 2019.
Returns:
A list of LogRecord messages.
"""
records = []
starting_match = None
lines_buffer = []
def ConvertOne() -> logging_pb2.LogRecord:
"""Convert the current starting_match and lines_buffer into a LogRecord."""
if starting_match:
records.append(
logging_pb2.LogRecord(
level=ABSL_LEVEL_TO_LOG_RECORD_LEVEL[starting_match.group("lvl")],
date_unix_epoch_ms=labdate.MillisecondsTimestamp(
DatetimeFromAbslTimestamp(
starting_match.group("timestamp"), year=year,
),
),
thread_id=int(starting_match.group("thread_id")),
file_name=starting_match.group("filename"),
line_number=int(starting_match.group("lineno")),
message="\n".join(
[starting_match.group("contents")] + lines_buffer,
).rstrip(),
),
)
for line in logs.split("\n"):
m = ABSL_LOGGING_LINE_RE.match(line)
if m:
ConvertOne()
starting_match = None
lines_buffer = []
starting_match = m
elif line and not starting_match:
raise ValueError(f"Failed to parse logging output at line: '{line}'")
else:
lines_buffer.append(line)
ConvertOne()
return records
def StartTeeLogsToFile(
program_name: str = None,
log_dir: str = None,
file_log_level: int = logging.DEBUG,
) -> None:
"""Log messages to file as well as stderr.
Args:
program_name: The name of the program.
log_dir: The directory to log to.
file_log_level: The minimum verbosity level to log to file to.
Raises:
FileNotFoundError: If the requested log_dir does not exist.
"""
if not pathlib.Path(log_dir).is_dir():
raise FileNotFoundError(f"Log directory not found: '{log_dir}'")
old_verbosity = logging.get_verbosity()
logging.set_verbosity(file_log_level)
logging.set_stderrthreshold(old_verbosity)
logging.get_absl_handler().start_logging_to_file(program_name, log_dir)
# The Absl logging handler function start_logging_to_file() sets logtostderr
# to False. Re-enable whatever value it was before the call.
FLAGS.logtostderr = False
def StopTeeLogsToFile():
"""Stop logging messages to file as well as stderr."""
logging.get_absl_handler().flush()
logging.get_absl_handler().stream = sys.stderr
FLAGS.logtostderr = True
@contextlib.contextmanager
def TeeLogsToFile(
program_name: str = None,
log_dir: str = None,
file_log_level: int = logging.DEBUG,
):
"""Temporarily enable logging to file.
Args:
program_name: The name of the program.
log_dir: The directory to log to.
file_log_level: The minimum verbosity level to log to file to.
"""
try:
StartTeeLogsToFile(program_name, log_dir, file_log_level)
yield
finally:
StopTeeLogsToFile() | labm8/py/logutil.py | """Utility code for converting between absl logging output and log protos."""
import contextlib
import datetime
import pathlib
import re
import sys
import typing
from absl import logging
from labm8.py import app
from labm8.py import labdate
from labm8.py.internal import logging_pb2
FLAGS = app.FLAGS
# A regular expression to match the components of an absl logging prefix. See:
# https://github.com/abseil/abseil-py/blob/e69e200f680a20c50e0e2cd9e74e9850ff69b856/absl/logging/__init__.py#L554-L583
ABSL_LOGGING_LINE_RE = re.compile(
r"(?P<lvl>[IWEF])(?P<timestamp>\d\d\d\d \d\d:\d\d:\d\d.\d\d\d\d\d\d) "
r"(?P<thread_id>\d+) (?P<filename>[^:]+):(?P<lineno>\d+)] "
r"(?P<contents>.*)",
)
# Convert a single letter absl logging prefix to a LogRecord.LogLevel. Since
# absl logging uses the same prefix for logging.DEBUG and logging.INFO, this
# conversion is lossy, as LogRecord.DEBUG is never returned.
ABSL_LEVEL_TO_LOG_RECORD_LEVEL = {
"I": logging_pb2.LogRecord.INFO,
"W": logging_pb2.LogRecord.WARNING,
"E": logging_pb2.LogRecord.ERROR,
"F": logging_pb2.LogRecord.FATAL,
}
def DatetimeFromAbslTimestamp(
timestamp: str, year: int = datetime.datetime.utcnow().year,
) -> datetime.datetime:
"""Convert absl logging timestamp to datetime.
WARNING: Absl logs do not include the year, so if parsing logs from previous
years, be sure to set the year argument! The default value assumes the logs
are from the current year.
Args:
timestamp: The string timestamp.
year: The year, as an integer. E.g. 2019.
Returns:
A datetime.
"""
dt = datetime.datetime.strptime(str(year) + timestamp, "%Y%m%d %H:%M:%S.%f")
return dt
def ConertAbslLogToProtos(
logs: str, year: int = datetime.datetime.utcnow().year,
) -> typing.List[logging_pb2.LogRecord]:
"""Convert the output of logging with absl logging to LogRecord protos.
WARNING: Absl logs do not include the year, so if parsing logs from previous
years, be sure to set the year argument! The default value assumes the logs
are from the current year.
Args:
logs: The output from logging with absl.
year: The year, as an integer. E.g. 2019.
Returns:
A list of LogRecord messages.
"""
records = []
starting_match = None
lines_buffer = []
def ConvertOne() -> logging_pb2.LogRecord:
"""Convert the current starting_match and lines_buffer into a LogRecord."""
if starting_match:
records.append(
logging_pb2.LogRecord(
level=ABSL_LEVEL_TO_LOG_RECORD_LEVEL[starting_match.group("lvl")],
date_unix_epoch_ms=labdate.MillisecondsTimestamp(
DatetimeFromAbslTimestamp(
starting_match.group("timestamp"), year=year,
),
),
thread_id=int(starting_match.group("thread_id")),
file_name=starting_match.group("filename"),
line_number=int(starting_match.group("lineno")),
message="\n".join(
[starting_match.group("contents")] + lines_buffer,
).rstrip(),
),
)
for line in logs.split("\n"):
m = ABSL_LOGGING_LINE_RE.match(line)
if m:
ConvertOne()
starting_match = None
lines_buffer = []
starting_match = m
elif line and not starting_match:
raise ValueError(f"Failed to parse logging output at line: '{line}'")
else:
lines_buffer.append(line)
ConvertOne()
return records
def StartTeeLogsToFile(
program_name: str = None,
log_dir: str = None,
file_log_level: int = logging.DEBUG,
) -> None:
"""Log messages to file as well as stderr.
Args:
program_name: The name of the program.
log_dir: The directory to log to.
file_log_level: The minimum verbosity level to log to file to.
Raises:
FileNotFoundError: If the requested log_dir does not exist.
"""
if not pathlib.Path(log_dir).is_dir():
raise FileNotFoundError(f"Log directory not found: '{log_dir}'")
old_verbosity = logging.get_verbosity()
logging.set_verbosity(file_log_level)
logging.set_stderrthreshold(old_verbosity)
logging.get_absl_handler().start_logging_to_file(program_name, log_dir)
# The Absl logging handler function start_logging_to_file() sets logtostderr
# to False. Re-enable whatever value it was before the call.
FLAGS.logtostderr = False
def StopTeeLogsToFile():
"""Stop logging messages to file as well as stderr."""
logging.get_absl_handler().flush()
logging.get_absl_handler().stream = sys.stderr
FLAGS.logtostderr = True
@contextlib.contextmanager
def TeeLogsToFile(
program_name: str = None,
log_dir: str = None,
file_log_level: int = logging.DEBUG,
):
"""Temporarily enable logging to file.
Args:
program_name: The name of the program.
log_dir: The directory to log to.
file_log_level: The minimum verbosity level to log to file to.
"""
try:
StartTeeLogsToFile(program_name, log_dir, file_log_level)
yield
finally:
StopTeeLogsToFile() | 0.730001 | 0.442275 |
import flask
import json
import logging
import mock
import testtools
from shakenfist.daemons import external_api
logging.basicConfig(level=logging.DEBUG)
class FakeResponse(object):
def __init__(self, status_code, text):
self.status_code = status_code
self.text = text
class ExternalApiTestCase(testtools.TestCase):
def setUp(self):
super(ExternalApiTestCase, self).setUp()
self.add_event = mock.patch(
'shakenfist.db.add_event')
self.mock_add_event = self.add_event.start()
external_api.TESTING = True
external_api.app.testing = True
external_api.app.debug = False
self.client = external_api.app.test_client()
def test_get_root(self):
resp = self.client.get('/')
self.assertEqual('Shaken Fist REST API service',
resp.get_data().decode('utf-8'))
self.assertEqual(200, resp.status_code)
self.assertEqual('text/plain; charset=utf-8', resp.content_type)
@mock.patch('shakenfist.db.get_instance', return_value=None)
def test_get_instance_not_found(self, mock_get_instance):
resp = self.client.get('/instances/foo')
self.assertEqual({'error': 'instance not found', 'status': 404},
resp.get_json())
self.assertEqual(404, resp.status_code)
self.assertEqual('application/json', resp.content_type)
@mock.patch('shakenfist.db.get_instance',
return_value={'uuid': '123',
'name': 'banana'})
def test_get_instance(self, mock_get_instance):
resp = self.client.get('/instances/foo')
self.assertEqual({'uuid': '123', 'name': 'banana'},
resp.get_json())
self.assertEqual(200, resp.status_code)
self.assertEqual('application/json', resp.content_type)
@mock.patch('shakenfist.db.get_instance',
return_value={'uuid': '123',
'name': 'banana',
'node': 'notthisone',
'disk_spec': [{
'base': 'cirros',
'size': 8
}],
'block_devices': None})
@mock.patch('shakenfist.config.parsed',
return_value={'INCLUDE_TRACEBACKS': '1',
'NODE_NAME': 'thisone',
'STORAGE_PATH': '/a/b/c'})
@mock.patch('requests.request',
return_value=FakeResponse(200, '{"fakestuff": "here"}'))
def test_delete_instance(self, mock_request, mock_get_config,
mock_get_instance):
resp = self.client.delete('/instances/foo')
mock_request.assert_called_with(
'DELETE', 'http://notthisone:1/instances/foo', data='{}')
self.assertEqual({'fakestuff': 'here'}, resp.get_json())
self.assertEqual(200, resp.status_code) | shakenfist/tests/test_daemons_external_api.py | import flask
import json
import logging
import mock
import testtools
from shakenfist.daemons import external_api
logging.basicConfig(level=logging.DEBUG)
class FakeResponse(object):
def __init__(self, status_code, text):
self.status_code = status_code
self.text = text
class ExternalApiTestCase(testtools.TestCase):
def setUp(self):
super(ExternalApiTestCase, self).setUp()
self.add_event = mock.patch(
'shakenfist.db.add_event')
self.mock_add_event = self.add_event.start()
external_api.TESTING = True
external_api.app.testing = True
external_api.app.debug = False
self.client = external_api.app.test_client()
def test_get_root(self):
resp = self.client.get('/')
self.assertEqual('Shaken Fist REST API service',
resp.get_data().decode('utf-8'))
self.assertEqual(200, resp.status_code)
self.assertEqual('text/plain; charset=utf-8', resp.content_type)
@mock.patch('shakenfist.db.get_instance', return_value=None)
def test_get_instance_not_found(self, mock_get_instance):
resp = self.client.get('/instances/foo')
self.assertEqual({'error': 'instance not found', 'status': 404},
resp.get_json())
self.assertEqual(404, resp.status_code)
self.assertEqual('application/json', resp.content_type)
@mock.patch('shakenfist.db.get_instance',
return_value={'uuid': '123',
'name': 'banana'})
def test_get_instance(self, mock_get_instance):
resp = self.client.get('/instances/foo')
self.assertEqual({'uuid': '123', 'name': 'banana'},
resp.get_json())
self.assertEqual(200, resp.status_code)
self.assertEqual('application/json', resp.content_type)
@mock.patch('shakenfist.db.get_instance',
return_value={'uuid': '123',
'name': 'banana',
'node': 'notthisone',
'disk_spec': [{
'base': 'cirros',
'size': 8
}],
'block_devices': None})
@mock.patch('shakenfist.config.parsed',
return_value={'INCLUDE_TRACEBACKS': '1',
'NODE_NAME': 'thisone',
'STORAGE_PATH': '/a/b/c'})
@mock.patch('requests.request',
return_value=FakeResponse(200, '{"fakestuff": "here"}'))
def test_delete_instance(self, mock_request, mock_get_config,
mock_get_instance):
resp = self.client.delete('/instances/foo')
mock_request.assert_called_with(
'DELETE', 'http://notthisone:1/instances/foo', data='{}')
self.assertEqual({'fakestuff': 'here'}, resp.get_json())
self.assertEqual(200, resp.status_code) | 0.435421 | 0.080033 |
from ui_styles import Style
from Social_GUI import *
#GLOBALS USED IN UI_FUNCTIONS
GLOBAL_STATE = 0
GLOBAL_TITLE_BAR = True
#COUNT INITIAL MENU
count = 1
#LINKS GUI TOGETHER
class MainWindow(QMainWindow):
def __init__(self):
#CREATE GUI WINDOW
QMainWindow.__init__(self)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.setWindowTitle('Main Window')
UIFunctions.labelTitle(self, "Social Distance Detector")
#START AND MIN SIZE
startSize = QSize(850, 800)
self.resize(startSize)
self.setMinimumSize(startSize)
#LINK MENUS BELOW
#TOGGLE MENU SIZE
self.ui.btn_toggle_menu.clicked.connect(lambda: UIFunctions.toggleMenu(self, 220, True))
#CREATE CUSTOM MENUS
self.ui.stackedWidget.setMinimumWidth(20)
UIFunctions.addNewMenu(self, "HOME", "btn_home", "url(:/16x16/icons/16x16/cil-home.png)", True)
UIFunctions.addNewMenu(self, "Advance Settings", "btn_advance", "url(:/16x16/icons/16x16/cil-equalizer.png)", False)
# START MENU
UIFunctions.selectStandardMenu(self, "btn_home")
#START PAGE
self.ui.stackedWidget.setCurrentWidget(self.ui.page_home)
# MOVE WINDOW / MAXIMIZE / RESTORE
def moveWindow(event):
# IF MAXIMIZED CHANGE TO NORMAL
if UIFunctions.returStatus() == 1:
UIFunctions.maximize_restore(self)
# MOVE WINDOW
if event.buttons() == Qt.LeftButton:
self.move(self.pos() + event.globalPos() - self.dragPos)
self.dragPos = event.globalPos()
event.accept()
#LOAD DEFINITIONS
UIFunctions.uiDefinitions(self)
# MOV EVENT AND SHOW GUI
self.ui.frame_label_top_btns.mouseMoveEvent = moveWindow
self.show()
def Button(self):
# GET CLICKED
btnWidget = self.sender()
# PAGE HOME
if btnWidget.objectName() == "btn_home":
self.ui.stackedWidget.setCurrentWidget(self.ui.page_home)
UIFunctions.resetStyle(self, "btn_home")
UIFunctions.labelPage(self, "Home")
btnWidget.setStyleSheet(UIFunctions.selectMenu(btnWidget.styleSheet()))
# PAGE WIDGETS
if btnWidget.objectName() == "btn_advance":
self.ui.stackedWidget.setCurrentWidget(self.ui.page_widgets)
UIFunctions.resetStyle(self, "btn_advance")
UIFunctions.labelPage(self, "Advance Settings")
btnWidget.setStyleSheet(UIFunctions.selectMenu(btnWidget.styleSheet()))
#MOUSE EVENT
def mousePressEvent(self, event):
self.dragPos = event.globalPos()
#RESIZE EVENT
def resizeEvent(self, event):
return super(MainWindow, self).resizeEvent(event)
class UIFunctions(MainWindow):
#GLOBALS
GLOBAL_STATE = 0
GLOBAL_TITLE_BAR = True
#MAXIMIZE/RESTORE
def maximize_restore(self):
global GLOBAL_STATE
status = GLOBAL_STATE
if status == 0:
self.showMaximized()
GLOBAL_STATE = 1
self.ui.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.ui.btn_maximize_restore.setToolTip("Restore")
self.ui.btn_maximize_restore.setIcon(QtGui.QIcon(u":/16x16/icons/16x16/cil-window-restore.png"))
self.ui.frame_top_btns.setStyleSheet("background-color: rgb(27, 29, 35)")
self.ui.frame_size_grip.hide()
else:
GLOBAL_STATE = 0
self.showNormal()
self.resize(self.width()+1, self.height()+1)
self.ui.horizontalLayout.setContentsMargins(10, 10, 10, 10)
self.ui.btn_maximize_restore.setToolTip("Maximize")
self.ui.btn_maximize_restore.setIcon(QtGui.QIcon(u":/16x16/icons/16x16/cil-window-maximize.png"))
self.ui.frame_top_btns.setStyleSheet("background-color: rgba(27, 29, 35, 200)")
self.ui.frame_size_grip.show()
#RETURN STATUS
def returStatus():
return GLOBAL_STATE
#SET STATUS
def setStatus(status):
global GLOBAL_STATE
GLOBAL_STATE = status
#ENABLE MAXIMUM SIZE
def enableMaximumSize(self, width, height):
if width != '' and height != '':
self.setMaximumSize(QSize(width, height))
self.ui.frame_size_grip.hide()
self.ui.btn_maximize_restore.hide()
#TOGGLE MENU
def toggleMenu(self, maxWidth, enable):
if enable:
# GET WIDTH
width = self.ui.frame_left_menu.width()
maxExtend = maxWidth
standard = 70
# SET MAX WIDTH
if width == 70:
widthExtended = maxExtend
else:
widthExtended = standard
# ANIMATION
self.animation = QPropertyAnimation(self.ui.frame_left_menu, b"minimumWidth")
self.animation.setDuration(300)
self.animation.setStartValue(width)
self.animation.setEndValue(widthExtended)
self.animation.setEasingCurve(QtCore.QEasingCurve.InOutQuart)
self.animation.start()
# LABEL TITLE
def labelTitle(self, text):
self.ui.label_title_bar_top.setText(text)
#DYNAMIC MENUS
def addNewMenu(self, name, objName, icon, isTopMenu):
font = QFont()
font.setFamily(u"Segoe UI")
button = QPushButton(str(count),self)
button.setObjectName(objName)
sizePolicy3 = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)
sizePolicy3.setHorizontalStretch(0)
sizePolicy3.setVerticalStretch(0)
sizePolicy3.setHeightForWidth(button.sizePolicy().hasHeightForWidth())
button.setSizePolicy(sizePolicy3)
button.setMinimumSize(QSize(0, 70))
button.setLayoutDirection(Qt.LeftToRight)
button.setFont(font)
button.setStyleSheet(Style.style_bt_standard.replace('ICON_REPLACE', icon))
button.setText(name)
button.setToolTip(name)
button.clicked.connect(self.Button)
if isTopMenu:
self.ui.layout_menus.addWidget(button)
else:
self.ui.layout_menu_bottom.addWidget(button)
#SELECT MENU
def selectMenu(getStyle):
select = getStyle + ("QPushButton { border-right: 7px solid rgb(44, 49, 60); }")
return select
# DESELECT MENU
def deselectMenu(getStyle):
deselect = getStyle.replace("QPushButton { border-right: 7px solid rgb(44, 49, 60); }", "")
return deselect
#START SELECTION
def selectStandardMenu(self, widget):
for w in self.ui.frame_left_menu.findChildren(QPushButton):
if w.objectName() == widget:
w.setStyleSheet(UIFunctions.selectMenu(w.styleSheet()))
#RESET SELECTION
def resetStyle(self, widget):
for w in self.ui.frame_left_menu.findChildren(QPushButton):
if w.objectName() != widget:
w.setStyleSheet(UIFunctions.deselectMenu(w.styleSheet()))
#CHANGE PAGE LABEL TEXT
def labelPage(self, text):
newText = '| ' + text.upper()
self.ui.label_top_info_2.setText(newText)
#USER ICON
def userIcon(self, initialsTooltip, icon, showHide):
if showHide:
# SET TEXT
self.ui.label_user_icon.setText(initialsTooltip)
# SET ICON
if icon:
style = self.ui.label_user_icon.styleSheet()
setIcon = "QLabel { background-image: " + icon + "; }"
self.ui.label_user_icon.setStyleSheet(style + setIcon)
self.ui.label_user_icon.setText('')
self.ui.label_user_icon.setToolTip(initialsTooltip)
else:
self.ui.label_user_icon.hide()
def uiDefinitions(self):
#DOUBLE CLICK MAXIMIZES WINDOW
def doubleClickMaximizeRestore(event):
# IF DOUBLE CLICK CHANGE STATUS
if event.type() == QtCore.QEvent.MouseButtonDblClick:
QtCore.QTimer.singleShot(250, lambda: UIFunctions.maximize_restore(self))
#MAXIMIZE WINDOW ON TITLE BAR WHEN DOUBLE CLICK
if GLOBAL_TITLE_BAR:
self.setWindowFlags(QtCore.Qt.FramelessWindowHint)
self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
self.ui.frame_label_top_btns.mouseDoubleClickEvent = doubleClickMaximizeRestore
#SHADOW
self.shadow = QGraphicsDropShadowEffect(self)
self.shadow.setBlurRadius(17)
self.shadow.setXOffset(0)
self.shadow.setYOffset(0)
self.shadow.setColor(QColor(0, 0, 0, 150))
self.ui.frame_main.setGraphicsEffect(self.shadow)
#RESIZE WINDOW
self.sizegrip = QSizeGrip(self.ui.frame_size_grip)
self.sizegrip.setStyleSheet("width: 20px; height: 20px; margin 0px; padding: 0px;")
#MINIMIZE
self.ui.btn_minimize.clicked.connect(lambda: self.showMinimized())
#MAXIMIZE/RESTORE
self.ui.btn_maximize_restore.clicked.connect(lambda: UIFunctions.maximize_restore(self))
#CLOSE APPLICATION
self.ui.btn_close.clicked.connect(lambda: self.close()) | ui_functions.py | from ui_styles import Style
from Social_GUI import *
#GLOBALS USED IN UI_FUNCTIONS
GLOBAL_STATE = 0
GLOBAL_TITLE_BAR = True
#COUNT INITIAL MENU
count = 1
#LINKS GUI TOGETHER
class MainWindow(QMainWindow):
def __init__(self):
#CREATE GUI WINDOW
QMainWindow.__init__(self)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.setWindowTitle('Main Window')
UIFunctions.labelTitle(self, "Social Distance Detector")
#START AND MIN SIZE
startSize = QSize(850, 800)
self.resize(startSize)
self.setMinimumSize(startSize)
#LINK MENUS BELOW
#TOGGLE MENU SIZE
self.ui.btn_toggle_menu.clicked.connect(lambda: UIFunctions.toggleMenu(self, 220, True))
#CREATE CUSTOM MENUS
self.ui.stackedWidget.setMinimumWidth(20)
UIFunctions.addNewMenu(self, "HOME", "btn_home", "url(:/16x16/icons/16x16/cil-home.png)", True)
UIFunctions.addNewMenu(self, "Advance Settings", "btn_advance", "url(:/16x16/icons/16x16/cil-equalizer.png)", False)
# START MENU
UIFunctions.selectStandardMenu(self, "btn_home")
#START PAGE
self.ui.stackedWidget.setCurrentWidget(self.ui.page_home)
# MOVE WINDOW / MAXIMIZE / RESTORE
def moveWindow(event):
# IF MAXIMIZED CHANGE TO NORMAL
if UIFunctions.returStatus() == 1:
UIFunctions.maximize_restore(self)
# MOVE WINDOW
if event.buttons() == Qt.LeftButton:
self.move(self.pos() + event.globalPos() - self.dragPos)
self.dragPos = event.globalPos()
event.accept()
#LOAD DEFINITIONS
UIFunctions.uiDefinitions(self)
# MOV EVENT AND SHOW GUI
self.ui.frame_label_top_btns.mouseMoveEvent = moveWindow
self.show()
def Button(self):
# GET CLICKED
btnWidget = self.sender()
# PAGE HOME
if btnWidget.objectName() == "btn_home":
self.ui.stackedWidget.setCurrentWidget(self.ui.page_home)
UIFunctions.resetStyle(self, "btn_home")
UIFunctions.labelPage(self, "Home")
btnWidget.setStyleSheet(UIFunctions.selectMenu(btnWidget.styleSheet()))
# PAGE WIDGETS
if btnWidget.objectName() == "btn_advance":
self.ui.stackedWidget.setCurrentWidget(self.ui.page_widgets)
UIFunctions.resetStyle(self, "btn_advance")
UIFunctions.labelPage(self, "Advance Settings")
btnWidget.setStyleSheet(UIFunctions.selectMenu(btnWidget.styleSheet()))
#MOUSE EVENT
def mousePressEvent(self, event):
self.dragPos = event.globalPos()
#RESIZE EVENT
def resizeEvent(self, event):
return super(MainWindow, self).resizeEvent(event)
class UIFunctions(MainWindow):
#GLOBALS
GLOBAL_STATE = 0
GLOBAL_TITLE_BAR = True
#MAXIMIZE/RESTORE
def maximize_restore(self):
global GLOBAL_STATE
status = GLOBAL_STATE
if status == 0:
self.showMaximized()
GLOBAL_STATE = 1
self.ui.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.ui.btn_maximize_restore.setToolTip("Restore")
self.ui.btn_maximize_restore.setIcon(QtGui.QIcon(u":/16x16/icons/16x16/cil-window-restore.png"))
self.ui.frame_top_btns.setStyleSheet("background-color: rgb(27, 29, 35)")
self.ui.frame_size_grip.hide()
else:
GLOBAL_STATE = 0
self.showNormal()
self.resize(self.width()+1, self.height()+1)
self.ui.horizontalLayout.setContentsMargins(10, 10, 10, 10)
self.ui.btn_maximize_restore.setToolTip("Maximize")
self.ui.btn_maximize_restore.setIcon(QtGui.QIcon(u":/16x16/icons/16x16/cil-window-maximize.png"))
self.ui.frame_top_btns.setStyleSheet("background-color: rgba(27, 29, 35, 200)")
self.ui.frame_size_grip.show()
#RETURN STATUS
def returStatus():
return GLOBAL_STATE
#SET STATUS
def setStatus(status):
global GLOBAL_STATE
GLOBAL_STATE = status
#ENABLE MAXIMUM SIZE
def enableMaximumSize(self, width, height):
if width != '' and height != '':
self.setMaximumSize(QSize(width, height))
self.ui.frame_size_grip.hide()
self.ui.btn_maximize_restore.hide()
#TOGGLE MENU
def toggleMenu(self, maxWidth, enable):
if enable:
# GET WIDTH
width = self.ui.frame_left_menu.width()
maxExtend = maxWidth
standard = 70
# SET MAX WIDTH
if width == 70:
widthExtended = maxExtend
else:
widthExtended = standard
# ANIMATION
self.animation = QPropertyAnimation(self.ui.frame_left_menu, b"minimumWidth")
self.animation.setDuration(300)
self.animation.setStartValue(width)
self.animation.setEndValue(widthExtended)
self.animation.setEasingCurve(QtCore.QEasingCurve.InOutQuart)
self.animation.start()
# LABEL TITLE
def labelTitle(self, text):
self.ui.label_title_bar_top.setText(text)
#DYNAMIC MENUS
def addNewMenu(self, name, objName, icon, isTopMenu):
font = QFont()
font.setFamily(u"Segoe UI")
button = QPushButton(str(count),self)
button.setObjectName(objName)
sizePolicy3 = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)
sizePolicy3.setHorizontalStretch(0)
sizePolicy3.setVerticalStretch(0)
sizePolicy3.setHeightForWidth(button.sizePolicy().hasHeightForWidth())
button.setSizePolicy(sizePolicy3)
button.setMinimumSize(QSize(0, 70))
button.setLayoutDirection(Qt.LeftToRight)
button.setFont(font)
button.setStyleSheet(Style.style_bt_standard.replace('ICON_REPLACE', icon))
button.setText(name)
button.setToolTip(name)
button.clicked.connect(self.Button)
if isTopMenu:
self.ui.layout_menus.addWidget(button)
else:
self.ui.layout_menu_bottom.addWidget(button)
#SELECT MENU
def selectMenu(getStyle):
select = getStyle + ("QPushButton { border-right: 7px solid rgb(44, 49, 60); }")
return select
# DESELECT MENU
def deselectMenu(getStyle):
deselect = getStyle.replace("QPushButton { border-right: 7px solid rgb(44, 49, 60); }", "")
return deselect
#START SELECTION
def selectStandardMenu(self, widget):
for w in self.ui.frame_left_menu.findChildren(QPushButton):
if w.objectName() == widget:
w.setStyleSheet(UIFunctions.selectMenu(w.styleSheet()))
#RESET SELECTION
def resetStyle(self, widget):
for w in self.ui.frame_left_menu.findChildren(QPushButton):
if w.objectName() != widget:
w.setStyleSheet(UIFunctions.deselectMenu(w.styleSheet()))
#CHANGE PAGE LABEL TEXT
def labelPage(self, text):
newText = '| ' + text.upper()
self.ui.label_top_info_2.setText(newText)
#USER ICON
def userIcon(self, initialsTooltip, icon, showHide):
if showHide:
# SET TEXT
self.ui.label_user_icon.setText(initialsTooltip)
# SET ICON
if icon:
style = self.ui.label_user_icon.styleSheet()
setIcon = "QLabel { background-image: " + icon + "; }"
self.ui.label_user_icon.setStyleSheet(style + setIcon)
self.ui.label_user_icon.setText('')
self.ui.label_user_icon.setToolTip(initialsTooltip)
else:
self.ui.label_user_icon.hide()
def uiDefinitions(self):
#DOUBLE CLICK MAXIMIZES WINDOW
def doubleClickMaximizeRestore(event):
# IF DOUBLE CLICK CHANGE STATUS
if event.type() == QtCore.QEvent.MouseButtonDblClick:
QtCore.QTimer.singleShot(250, lambda: UIFunctions.maximize_restore(self))
#MAXIMIZE WINDOW ON TITLE BAR WHEN DOUBLE CLICK
if GLOBAL_TITLE_BAR:
self.setWindowFlags(QtCore.Qt.FramelessWindowHint)
self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
self.ui.frame_label_top_btns.mouseDoubleClickEvent = doubleClickMaximizeRestore
#SHADOW
self.shadow = QGraphicsDropShadowEffect(self)
self.shadow.setBlurRadius(17)
self.shadow.setXOffset(0)
self.shadow.setYOffset(0)
self.shadow.setColor(QColor(0, 0, 0, 150))
self.ui.frame_main.setGraphicsEffect(self.shadow)
#RESIZE WINDOW
self.sizegrip = QSizeGrip(self.ui.frame_size_grip)
self.sizegrip.setStyleSheet("width: 20px; height: 20px; margin 0px; padding: 0px;")
#MINIMIZE
self.ui.btn_minimize.clicked.connect(lambda: self.showMinimized())
#MAXIMIZE/RESTORE
self.ui.btn_maximize_restore.clicked.connect(lambda: UIFunctions.maximize_restore(self))
#CLOSE APPLICATION
self.ui.btn_close.clicked.connect(lambda: self.close()) | 0.224906 | 0.044995 |
import numpy as np
import sys
class Softmax:
def forward_vec(self, scores, win_idx):
"""
Compute forward propagation.
- scores: ndarray(p,). Vector of class scores
- win_idx: int. Index of ground truth
"""
# Keep scores for backprop
self.scores= scores
# Keep win_idx for backprop
self.win_idx= win_idx
# Softmax
h= np.exp(scores)
self.softmax_result= h
return h[win_idx]/np.sum(h)
def forward(self, scores, win_idx):
"""
Compute forward propagation.
- scores: ndarray(p,m). Matrix with class scores as columns.
- win_idx: ndarray(m,). Vector with ground truth
"""
# For numerical stability
# scores -= np.max(scores)
# Keep scoes for backprop
self.scores= scores
# Keep win_idx for backprop
self.win_idx= win_idx
# Softmax
# h= np.exp(scores)
# sys.stderr.write(str(h.shape))
h= np.exp(scores - np.max(scores))
self.softmax_result= h
col_idx= np.arange(scores.shape[1])
ground_truths= h[win_idx, col_idx]
return ground_truths/np.sum(h, axis=0)
def backward_vec(self, up_grad):
"""
Compute local gradient and backprop
- up_grad: ndarray(1). Upstream gradient
"""
h= self.softmax_result
# Local gradient
# For j != win_idx
loc_grad= h * ((-h[self.win_idx])/np.sum(h)**2)
# For j== win_idx
loc_grad[self.win_idx]= loc_grad[self.win_idx] / ((-h[self.win_idx])/np.sum(h)**2) * ((np.sum(h)-h[self.win_idx])/np.sum(h)**2)
return up_grad*loc_grad
def backward(self, up_grad):
"""
Compute local gradient and backprop
- up_grad: ndarray(m,). Upstream gradient
"""
h= self.softmax_result
col_idx= np.arange(h.shape[1])
# Local gradient
h_sum= np.sum(h, axis=0)
den= ((-h[self.win_idx, col_idx])/h_sum**2)
# sys.stderr.write(str(den))
# For j != win_idx
loc_grad= np.array(h * den, dtype=np.double)
# For j== win_idx
loc_grad[self.win_idx]= loc_grad[self.win_idx] / den * ((h_sum-h[self.win_idx, col_idx])/h_sum**2)
return loc_grad*up_grad | old/test/src/nodes/softmax.py |
import numpy as np
import sys
class Softmax:
def forward_vec(self, scores, win_idx):
"""
Compute forward propagation.
- scores: ndarray(p,). Vector of class scores
- win_idx: int. Index of ground truth
"""
# Keep scores for backprop
self.scores= scores
# Keep win_idx for backprop
self.win_idx= win_idx
# Softmax
h= np.exp(scores)
self.softmax_result= h
return h[win_idx]/np.sum(h)
def forward(self, scores, win_idx):
"""
Compute forward propagation.
- scores: ndarray(p,m). Matrix with class scores as columns.
- win_idx: ndarray(m,). Vector with ground truth
"""
# For numerical stability
# scores -= np.max(scores)
# Keep scoes for backprop
self.scores= scores
# Keep win_idx for backprop
self.win_idx= win_idx
# Softmax
# h= np.exp(scores)
# sys.stderr.write(str(h.shape))
h= np.exp(scores - np.max(scores))
self.softmax_result= h
col_idx= np.arange(scores.shape[1])
ground_truths= h[win_idx, col_idx]
return ground_truths/np.sum(h, axis=0)
def backward_vec(self, up_grad):
"""
Compute local gradient and backprop
- up_grad: ndarray(1). Upstream gradient
"""
h= self.softmax_result
# Local gradient
# For j != win_idx
loc_grad= h * ((-h[self.win_idx])/np.sum(h)**2)
# For j== win_idx
loc_grad[self.win_idx]= loc_grad[self.win_idx] / ((-h[self.win_idx])/np.sum(h)**2) * ((np.sum(h)-h[self.win_idx])/np.sum(h)**2)
return up_grad*loc_grad
def backward(self, up_grad):
"""
Compute local gradient and backprop
- up_grad: ndarray(m,). Upstream gradient
"""
h= self.softmax_result
col_idx= np.arange(h.shape[1])
# Local gradient
h_sum= np.sum(h, axis=0)
den= ((-h[self.win_idx, col_idx])/h_sum**2)
# sys.stderr.write(str(den))
# For j != win_idx
loc_grad= np.array(h * den, dtype=np.double)
# For j== win_idx
loc_grad[self.win_idx]= loc_grad[self.win_idx] / den * ((h_sum-h[self.win_idx, col_idx])/h_sum**2)
return loc_grad*up_grad | 0.404625 | 0.397938 |
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.keys import Keys
import time
import re
import redis
def main(key,address):
data = []
driver = webdriver.Chrome("D:/Program Files/python/chromedriver.exe")
driver2 = webdriver.Chrome("D:/Program Files/python/chromedriver.exe")
pool = redis.ConnectionPool(host='127.0.0.1',password='')
r = redis.Redis(connection_pool=pool)
try:
driver.get("https://www.liepin.com/zhaopin")
driver.implicitly_wait(3)
# list down
selector = driver.find_element_by_xpath('//*[@id="sojob"]/div[1]/form/div[1]/div/div/ul/li[1]/span/em')
selector.click()
cityPanel1 = driver.find_element_by_xpath('//*[@id="sojob"]/div[10]/div[2]')
# city select 依旧不同场景选择城市
citys = cityPanel1.find_elements_by_tag_name('a')
for c in citys:
if c.text == address:
c.click()
driver.find_element_by_xpath('//*[@id="sojob"]/div[10]/div[3]/a[1]').click()
break
if c.text == '广东' and address == '佛山':
c.click()
cityPanel2 = driver.find_element_by_xpath('//*[@id="sojob"]/div[10]/div[2]')
city2s = cityPanel2.find_elements_by_tag_name('a')
for c2 in city2s:
if c2.text == '佛山':
c2.click()
driver.find_element_by_xpath('//*[@id="sojob"]/div[10]/div[3]/a[1]').click()
break
break
# type1
type1 = driver.find_element_by_xpath('//*[@id="sojob"]/div[1]/form/div[2]/div/div[1]/dl[6]/dd[2]/ul/li[3]/a')
driver.execute_script("arguments[0].click();", type1)
# type2
type2 = driver.find_element_by_xpath('//*[@id="sojob"]/div[1]/form/div[2]/div/div[1]/dl[6]/dd[1]/ul/li[4]/a')
driver.execute_script("arguments[0].click();", type2)
# query
keyInput = driver.find_element_by_xpath('//*[@id="sojob"]/div[1]/form/div[1]/div/div/div[1]/input')
keyInput.send_keys(key)
keyInput.send_keys(Keys.ENTER)
#get list
elems = driver.find_elements_by_xpath('//*[@id="sojob"]/div[2]/div/div[1]/div[1]/ul/li')
for i in elems:
name = i.find_element_by_xpath('./div/div/h3/a')
url = name.get_attribute('href')
company = i.find_element_by_xpath('./div/div[2]/p[1]/a')
salary = i.find_element_by_xpath('./div/div[1]/p[1]/span[1]')
e = { 'type':'liepin' ,'key':key,'name':name.text,'url': url,'company':company.text,'salary':salary.text,'detail':'','address':''}
driver2.get(url)
driver2.implicitly_wait(2)
try:
e['detail'] = driver2.find_element_by_xpath('//*[@id="job-view-enterprise"]/div/div/div[1]/div[1]/div[3]/div').text
e['address'] = driver2.find_element_by_xpath('//*[@id="job-view-enterprise"]/div/div/div[2]/div[2]/div/div[1]/div/ul[1]/li[3]').text
except Exception as err:
print(err)
r.hset('liepin',e['company']+' '+e['name'],str(e))
data.append(e)
# next
while True:
elemNext = None
try:
pagebar = driver.find_element_by_xpath('//*[@id="sojob"]/div[2]/div/div[1]/div[1]/div/div')
pagebtns = pagebar.find_elements_by_tag_name('a')
for i in pagebtns:
if i.text == '下一页':
elemNext = i
break
except Exception as err:
print(err)
continue
if elemNext is None or elemNext.get_attribute('class') != '':
break
driver.execute_script("arguments[0].click();", elemNext)
time.sleep(2)
temp = driver.find_elements_by_xpath('//*[@id="sojob"]/div[2]/div/div[1]/div[1]/ul/li')
for j in temp:
name = j.find_element_by_xpath('./div/div/h3/a')
url = name.get_attribute('href')
company = j.find_element_by_xpath('./div/div[2]/p[1]/a')
salary = j.find_element_by_xpath('./div/div[1]/p[1]/span[1]')
e = { 'type':'liepin' ,'key':key,'name':name.text,'url': url,'company':company.text,'salary':salary.text,'detail':'','address':''}
driver2.get(url)
driver2.implicitly_wait(2)
try:
e['detail'] = driver2.find_element_by_xpath('//*[@id="job-view-enterprise"]/div/div/div[1]/div[1]/div[3]/div').text
e['address'] = driver2.find_element_by_xpath('//*[@id="job-view-enterprise"]/div/div/div[2]/div[2]/div/div[1]/div/ul[1]/li[3]').text
except Exception as err:
print(err)
r.hset('liepin',e['company']+' '+e['name'],str(e))
data.append(e)
except Exception as err:
print(err)
return data
finally:
driver.quit()
driver2.quit()
pool.disconnect()
return data
main('c#','广州') | py/liepin_Craw.py | from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.keys import Keys
import time
import re
import redis
def main(key,address):
data = []
driver = webdriver.Chrome("D:/Program Files/python/chromedriver.exe")
driver2 = webdriver.Chrome("D:/Program Files/python/chromedriver.exe")
pool = redis.ConnectionPool(host='127.0.0.1',password='')
r = redis.Redis(connection_pool=pool)
try:
driver.get("https://www.liepin.com/zhaopin")
driver.implicitly_wait(3)
# list down
selector = driver.find_element_by_xpath('//*[@id="sojob"]/div[1]/form/div[1]/div/div/ul/li[1]/span/em')
selector.click()
cityPanel1 = driver.find_element_by_xpath('//*[@id="sojob"]/div[10]/div[2]')
# city select 依旧不同场景选择城市
citys = cityPanel1.find_elements_by_tag_name('a')
for c in citys:
if c.text == address:
c.click()
driver.find_element_by_xpath('//*[@id="sojob"]/div[10]/div[3]/a[1]').click()
break
if c.text == '广东' and address == '佛山':
c.click()
cityPanel2 = driver.find_element_by_xpath('//*[@id="sojob"]/div[10]/div[2]')
city2s = cityPanel2.find_elements_by_tag_name('a')
for c2 in city2s:
if c2.text == '佛山':
c2.click()
driver.find_element_by_xpath('//*[@id="sojob"]/div[10]/div[3]/a[1]').click()
break
break
# type1
type1 = driver.find_element_by_xpath('//*[@id="sojob"]/div[1]/form/div[2]/div/div[1]/dl[6]/dd[2]/ul/li[3]/a')
driver.execute_script("arguments[0].click();", type1)
# type2
type2 = driver.find_element_by_xpath('//*[@id="sojob"]/div[1]/form/div[2]/div/div[1]/dl[6]/dd[1]/ul/li[4]/a')
driver.execute_script("arguments[0].click();", type2)
# query
keyInput = driver.find_element_by_xpath('//*[@id="sojob"]/div[1]/form/div[1]/div/div/div[1]/input')
keyInput.send_keys(key)
keyInput.send_keys(Keys.ENTER)
#get list
elems = driver.find_elements_by_xpath('//*[@id="sojob"]/div[2]/div/div[1]/div[1]/ul/li')
for i in elems:
name = i.find_element_by_xpath('./div/div/h3/a')
url = name.get_attribute('href')
company = i.find_element_by_xpath('./div/div[2]/p[1]/a')
salary = i.find_element_by_xpath('./div/div[1]/p[1]/span[1]')
e = { 'type':'liepin' ,'key':key,'name':name.text,'url': url,'company':company.text,'salary':salary.text,'detail':'','address':''}
driver2.get(url)
driver2.implicitly_wait(2)
try:
e['detail'] = driver2.find_element_by_xpath('//*[@id="job-view-enterprise"]/div/div/div[1]/div[1]/div[3]/div').text
e['address'] = driver2.find_element_by_xpath('//*[@id="job-view-enterprise"]/div/div/div[2]/div[2]/div/div[1]/div/ul[1]/li[3]').text
except Exception as err:
print(err)
r.hset('liepin',e['company']+' '+e['name'],str(e))
data.append(e)
# next
while True:
elemNext = None
try:
pagebar = driver.find_element_by_xpath('//*[@id="sojob"]/div[2]/div/div[1]/div[1]/div/div')
pagebtns = pagebar.find_elements_by_tag_name('a')
for i in pagebtns:
if i.text == '下一页':
elemNext = i
break
except Exception as err:
print(err)
continue
if elemNext is None or elemNext.get_attribute('class') != '':
break
driver.execute_script("arguments[0].click();", elemNext)
time.sleep(2)
temp = driver.find_elements_by_xpath('//*[@id="sojob"]/div[2]/div/div[1]/div[1]/ul/li')
for j in temp:
name = j.find_element_by_xpath('./div/div/h3/a')
url = name.get_attribute('href')
company = j.find_element_by_xpath('./div/div[2]/p[1]/a')
salary = j.find_element_by_xpath('./div/div[1]/p[1]/span[1]')
e = { 'type':'liepin' ,'key':key,'name':name.text,'url': url,'company':company.text,'salary':salary.text,'detail':'','address':''}
driver2.get(url)
driver2.implicitly_wait(2)
try:
e['detail'] = driver2.find_element_by_xpath('//*[@id="job-view-enterprise"]/div/div/div[1]/div[1]/div[3]/div').text
e['address'] = driver2.find_element_by_xpath('//*[@id="job-view-enterprise"]/div/div/div[2]/div[2]/div/div[1]/div/ul[1]/li[3]').text
except Exception as err:
print(err)
r.hset('liepin',e['company']+' '+e['name'],str(e))
data.append(e)
except Exception as err:
print(err)
return data
finally:
driver.quit()
driver2.quit()
pool.disconnect()
return data
main('c#','广州') | 0.062653 | 0.049131 |
import pymongo, datetime
from bson.objectid import ObjectId
import ast
#db = pymongo.Connection('localhost', 27017)['sicki3']
db = pymongo.Connection('localhost', 3002)['meteor']
# source collections
#economics = db.economics
event = db.event
#host = db.host
#location = db.location
#'metaData': metaData = db.'metaData': metaData
#metrics = db.metrics
#pathogen = db.pathogen
# update entries from the other collections
eid_events = event.find()
meta = {
'rank' : {
'eha': True,
'expert': False,
'top': True,
'auto': False
},
'votes': {
'up': 0,
'down': 0
},
'userId': 0,
'reviewer': 'eha',
'submitted': ''
}
print meta
count = 0
#dates = ['startDate','startDateISO','refStartDate','endDate','endDateISO','refEndDate']
for eid in eid_events:
#print eid['eventName']
count += 1
# Load host
cursor = db.host.find ({'eventName': eid['eventName']})
for value in cursor:
eid_host = value
#print eid_host['hostTaxOrder']
# Load economics
cursor = db.economics.find ({'eventName': eid['eventName']})
for value in cursor:
eid_economics = value
#print eid_economics['avgAgeDeath']
# Load pathogens
cursor = db.pathogen.find ({'eventName': eid['eventName']})
for value in cursor:
eid_pathogen = value
#print eid_pathogen['pathogenClass']
# Load locations
cursor = db.location.find ({'eventName': eid['eventName']})
for value in cursor:
eid_location = value
#print eid_location['locationNation']
# Load SI matches
cursor = db.match_key_si.find ({'jonesEIDID': eid['eidID']})
# print type(db.match_key_si.findOne({}, {'jonesEIDID':1}))
for value in cursor:
eid_match = value
print eid_match['jonesEIDID']
id = eid.get('_id')
eid_body = {
# Meta Data
'meta': {
'rank' : {
'eha': True,
'expert': False,
'top': True,
'auto': False
},
'votes': {
'up': 0,
'down': 0
},
'userId': 0,
'reviewer': eid['reviewer'],
'submitted': ''
},
# Calculated fields
'commentsCount': 0, #Calculated field
'refsCount': 0, #Calculated field
# Relational fields
# populate refs when matched with the original jones refs
'refs': ast.literal_eval(eid_match['jonesSources']), #Calculated field
'comments' : [], #Calculated field
# Quoted value
'valQuote': {}, # original data from Jones et al SI
# Value object
'val' : {
'sickiID':{
'val':'',
'valQuote': eid['eidID'],
'ref': 'ehaID',
'meta': meta
},
'eventName':{
'val':'',
'valQuote': eid['eventName'],
'refBlob': eid['refEventName'],
'ref': [],
'meta': meta
},
'disease':{
'val': eid['diseaseVal'],
'valQuote': eid['disease'],
'icd10':eid['ICD10Val'],
'ref':[],
'refBlob': eid['refDisease'],
'meta': meta
},
'eid':{
'val':'',
'valQuote': eid['eid'],
'ref':[],
'refBlob': eid['refEID'],
'meta': meta
},
'eidCategory':{
'val':'',
'valQuote': eid['eidCategory'],
'ref':[],
'refBlob': eid['refEIDCategory'],
'meta': meta
},
'abstract': {
'val':'',
'valQuote': eid['Abstract'],
'ref':[],
'refBlob': eid['refAbstract'],
'meta': meta
},
'notes': {
'val':'',
'valQuote': eid['notes'],
'ref':[],
'refBlob': eid['refNotes'],
'meta': meta
},
'transmissionModel': {
'val':eid['transitionModelVal'],
'valQuote': eid['transitionModel'],
'ref':[],
'refBlob': eid['refTransitionModel'],
'meta': meta
},
'zoonoticType': {
'val':eid['zoonoticTypeVal'],
'valQuote': eid['zoonoticType'],
'ref':[],
'refBlob': eid['refZoonoticType'],
'meta': meta
},
'sampleType': {
'val':eid['sampleTypeVal'],
'valQuote': eid['sampleType'],
'ref':[],
'refBlob': eid['refSampleType'],
'meta': meta
},
'driver': {
'val':eid['driverVal'],
'valQuote': eid['driver'],
'ref':[],
'refBlob': eid['refDriver'],
'meta': meta
},
## Pathogens
'pathogens':{
'drugResistance': {
'val':'',
'valQuote': eid_pathogen['pathogenDrugResistance'],
'ref':[],
'refBlob': eid_pathogen['refPathogenDrugResistance'],
'meta': meta
},
'reportedName': {
'val':'',
'valQuote': eid_pathogen['pathogenReportedName'],
'ref':[],
'refBlob': eid_pathogen['refPathogenReportedName'],
'meta': meta
},
'class': {
'val':'',
'valQuote': eid_pathogen['pathogenClass'],
'ref':[],
'refBlob': eid_pathogen['refPathogenClass'],
'meta': meta
},
'family': {
'val':'',
'valQuote': eid_pathogen['pathogenFamily'],
'ref':[],
'refBlob': eid_pathogen['refPathogenFamily'],
'meta': meta
},
'species': {
'val':'',
'valQuote': eid_pathogen['pathogenSpecies'],
'ref':[],
'refBlob': eid_pathogen['refPathogenSpecies'],
'meta': meta
},
'authority': {
'val':'',
'valQuote': eid_pathogen['pathogenAuthority'],
'ref':[],
'refBlob': eid_pathogen['refPathogenAuthority'],
'meta': meta
},
'taxOrder': {
'val':'',
'valQuote': eid_pathogen['pathogenTaxOrder'],
'ref':[],
'refBlob': eid_pathogen['refPathogenTaxOrder'],
'meta': meta
},
'genus': {
'val':'',
'valQuote': eid_pathogen['pathogenGenus'],
'ref':[],
'refBlob': eid_pathogen['refPathogenGenus'],
'meta': meta
},
'subSpecies': {
'val':'',
'valQuote': eid_pathogen['pathogenSubSpecies'],
'ref':[],
'refBlob': eid_pathogen['refPathogenSubSpecies'],
'meta': meta
}
}, #pathogens
## Locations
'locations':{
'name': {
'val':'',
'valQuote': eid_location['locationLocationName'],
'ref':[],
'refBlob': eid_location['refLocationLocationName'],
'meta': meta
},
'placeName': {
'val':'',
'valQuote': eid_location['locationPlaceName'],
'ref':[],
'refBlob': eid_location['refLocationPlaceName'],
'meta': meta
},
'latitude': {
'val':'',
'valQuote': eid_location['locationLatitude'],
'ref':[],
'refBlob': eid_location['refLocationLatitude'],
'meta': meta
},
'longitude': {
'val':'',
'valQuote': eid_location['locationLongitude'],
'ref':[],
'refBlob': eid_location['refLocationLongitude'],
'meta': meta
},
'city': {
'val':'',
'valQuote': eid_location['locationCity'],
'ref':[],
'refBlob': eid_location['refLocationCity'],
'meta': meta
},
'subnationalRegion': {
'val':'',
'valQuote': eid_location['locationSubnationalRegion'],
'ref':[],
'refBlob': eid_location['refLocationSubnationalRegion'],
'meta': meta
},
'nation': {
'val':'',
'valQuote': eid_location['locationNation'],
'ref':[],
'refBlob': eid_location['refLocationNation'],
'meta': meta
},
'continent': {
'val':'',
'valQuote': eid_location['locationContinent'],
'ref':[],
'refBlob': eid_location['refLocationContinent'],
'meta': meta
}
}, #locations
## Hosts
'hosts':{
'sex': {
'val':'',
'valQuote': eid['hostSex'],
'ref':[],
'refBlob': eid['refHostSex'],
'meta': meta
},
'domesticationStatus': {
'val':'',
'valQuote': eid['domesticationStatus'],
'ref':[],
'refBlob': eid['refDomesticationStatus'],
'meta': meta
},
'age': {
'val':eid['hostAgeVal'],
'valQuote': eid['hostAge'],
'ref':[],
'refBlob': eid['refHostAge'],
'meta': meta
},
'hostUse': {
'val':'',
'valQuote': eid['hostUse'],
'ref':[],
'refBlob': eid['refHostUse'],
'meta': meta
},
'reportedName': {
'val':'',
'valQuote': eid_host['hostReportedName'],
'ref':[],
'refBlob': eid_host['refHostReportedName'],
'meta': meta
},
'class': {
'val':'',
'valQuote': eid_host['hostClass'],
'ref':[],
'refBlob': eid_host['refHostClass'],
'meta': meta
},
'family': {
'val':'',
'valQuote': eid_host['hostFamily'],
'ref':[],
'refBlob': eid_host['refHostFamily'],
'meta': meta
},
'species': {
'val':'',
'valQuote': eid_host['hostSpecies'],
'ref':[],
'refBlob': eid_host['refHostSpecies'],
'meta': meta
},
'authority': {
'val':'',
'valQuote': eid_host['hostAuthority'],
'ref':[],
'refBlob': eid_host['refHostAuthority'],
'meta': meta
},
'taxOrder': {
'val':'',
'valQuote': eid_host['hostTaxOrder'],
'ref':[],
'refBlob': eid_host['refHostTaxOrder'],
'meta': meta
},
'genus': {
'val':'',
'valQuote': eid_host['hostGenus'],
'ref':[],
'refBlob': eid_host['refHostGenus'],
'meta': meta
},
'subSpecies': {
'val':'',
'valQuote': eid_host['hostSubSpecies'],
'ref':[],
'refBlob': eid_host['refHostSubSpecies'],
'meta': meta
}
}, #hosts
## Dates
'dates':{
'startDate':{
'val':'',
'valQuote': eid['startDate'],
'valForm': eid['startDateISO'],
'ref':[],
'refBlob': eid['refStartDate'],
'meta': meta
},
'endDate':{
'val':'',
'valQuote': eid['endDate'],
'valForm': eid['endDateISO'],
'ref':[],
'refBlob': eid['refEndDate'],
'meta': meta
},
'duration': {
'val':'',
'valQuote': eid['duration'],
'ref':[],
'refBlob': eid['refDuration'],
'meta': meta
}
}, #dates
## Characteristics
'characteristics':{
'numberInfected': {
'val':eid['numberInfectedVal'],
'valQuote': eid['numberInfected'],
'ref':[],
'refBlob': eid['refNumberInfected'],
'meta': meta
},
'prevalence': {
'val':'',
'valQuote': eid['prevalence'],
'ref':[],
'refBlob': eid['refPrevalence'],
'meta': meta
},
'symptomsReported': {
'val':eid['SymptomsReportedVal'],
'valQuote': eid['symptomsReported'],
'ref':[],
'refBlob': eid['refSymptomsReported'],
'meta': meta
},
'numberOfDeaths': {
'val':eid['numberOfDeathsVal'],
'valQuote': eid['numberOfDeaths'],
'ref':[],
'refBlob': eid['refNumberofDeaths'],
'meta': meta
},
}, #characteristics
## Contacts
'contacts':{
'firstName': '',
'lastName': '',
'email': '',
'affiliation': '',
'userID': '',
'blob': {
'val': eid['contact'],
'ref':[],
'refBlob': eid['refContact']
},
'meta': meta
}, # contacts
## Economics
'economics':{
'avgAgeInfected': {
'val':'',
'valQuote': eid_economics['avgAgeOfInfected'],
'ref':[],
'refBlob': eid_economics['refAvgAgeOfInfected'],
'meta': meta},
'avgAgeDeath': {
'val':'',
'valQuote': eid_economics['avgAgeDeath'],
'ref':[],
'refBlob': eid_economics['refAvgAgeDeath'],
'meta': meta},
'tradeTravelRestrictions': {
'val':'',
'valQuote': eid_economics['tradeTravelRestrictions'],
'ref':[],
'refBlob': eid_economics['refTradeTravelRestrictions'],
'meta': meta},
'numHospitalized': {
'val':'',
'valQuote': eid_economics['numHospitalizedInEvent'],
'ref':[],
'refBlob': eid_economics['refNumHospInEvent'],
'meta': meta},
'avgCostPerTreatment': {
'val':'',
'valQuote': eid_economics['avgCosPerTreatmentInEvent'],
'ref':[],
'refBlob': eid_economics['refAvgCostTreatmentInEvent'],
'meta': meta
},
'perCapitaNatGDPEventYear': {
'val':'',
'valQuote': eid_economics['perCapitaNationalGDPInYearOfEvent'],
'ref':[],
'refBlob': eid_economics['refPerCapitaNationalGDPInYearOfEvent'],
'meta': meta
},
'avgLifeExpectEventCountryYear': {
'val':'',
'valQuote': eid_economics['avgLifeExpectancyInCountryAndYearOfEvent'],
'ref':[],
'refBlob': eid_economics['refAvgLifeExpectancyInCountryAndYearOfEvent'],
'meta': meta
}
} # economics
} # value object
} #body
#fix the OjectID format
eid_body['_id'] = str(ObjectId())
#print eid_body['refs'][0]
#print count
#['sickiID']
#['meta']['reviewer']
db.entries.insert(eid_body) | stats2sicki.py | import pymongo, datetime
from bson.objectid import ObjectId
import ast
#db = pymongo.Connection('localhost', 27017)['sicki3']
db = pymongo.Connection('localhost', 3002)['meteor']
# source collections
#economics = db.economics
event = db.event
#host = db.host
#location = db.location
#'metaData': metaData = db.'metaData': metaData
#metrics = db.metrics
#pathogen = db.pathogen
# update entries from the other collections
eid_events = event.find()
meta = {
'rank' : {
'eha': True,
'expert': False,
'top': True,
'auto': False
},
'votes': {
'up': 0,
'down': 0
},
'userId': 0,
'reviewer': 'eha',
'submitted': ''
}
print meta
count = 0
#dates = ['startDate','startDateISO','refStartDate','endDate','endDateISO','refEndDate']
for eid in eid_events:
#print eid['eventName']
count += 1
# Load host
cursor = db.host.find ({'eventName': eid['eventName']})
for value in cursor:
eid_host = value
#print eid_host['hostTaxOrder']
# Load economics
cursor = db.economics.find ({'eventName': eid['eventName']})
for value in cursor:
eid_economics = value
#print eid_economics['avgAgeDeath']
# Load pathogens
cursor = db.pathogen.find ({'eventName': eid['eventName']})
for value in cursor:
eid_pathogen = value
#print eid_pathogen['pathogenClass']
# Load locations
cursor = db.location.find ({'eventName': eid['eventName']})
for value in cursor:
eid_location = value
#print eid_location['locationNation']
# Load SI matches
cursor = db.match_key_si.find ({'jonesEIDID': eid['eidID']})
# print type(db.match_key_si.findOne({}, {'jonesEIDID':1}))
for value in cursor:
eid_match = value
print eid_match['jonesEIDID']
id = eid.get('_id')
eid_body = {
# Meta Data
'meta': {
'rank' : {
'eha': True,
'expert': False,
'top': True,
'auto': False
},
'votes': {
'up': 0,
'down': 0
},
'userId': 0,
'reviewer': eid['reviewer'],
'submitted': ''
},
# Calculated fields
'commentsCount': 0, #Calculated field
'refsCount': 0, #Calculated field
# Relational fields
# populate refs when matched with the original jones refs
'refs': ast.literal_eval(eid_match['jonesSources']), #Calculated field
'comments' : [], #Calculated field
# Quoted value
'valQuote': {}, # original data from Jones et al SI
# Value object
'val' : {
'sickiID':{
'val':'',
'valQuote': eid['eidID'],
'ref': 'ehaID',
'meta': meta
},
'eventName':{
'val':'',
'valQuote': eid['eventName'],
'refBlob': eid['refEventName'],
'ref': [],
'meta': meta
},
'disease':{
'val': eid['diseaseVal'],
'valQuote': eid['disease'],
'icd10':eid['ICD10Val'],
'ref':[],
'refBlob': eid['refDisease'],
'meta': meta
},
'eid':{
'val':'',
'valQuote': eid['eid'],
'ref':[],
'refBlob': eid['refEID'],
'meta': meta
},
'eidCategory':{
'val':'',
'valQuote': eid['eidCategory'],
'ref':[],
'refBlob': eid['refEIDCategory'],
'meta': meta
},
'abstract': {
'val':'',
'valQuote': eid['Abstract'],
'ref':[],
'refBlob': eid['refAbstract'],
'meta': meta
},
'notes': {
'val':'',
'valQuote': eid['notes'],
'ref':[],
'refBlob': eid['refNotes'],
'meta': meta
},
'transmissionModel': {
'val':eid['transitionModelVal'],
'valQuote': eid['transitionModel'],
'ref':[],
'refBlob': eid['refTransitionModel'],
'meta': meta
},
'zoonoticType': {
'val':eid['zoonoticTypeVal'],
'valQuote': eid['zoonoticType'],
'ref':[],
'refBlob': eid['refZoonoticType'],
'meta': meta
},
'sampleType': {
'val':eid['sampleTypeVal'],
'valQuote': eid['sampleType'],
'ref':[],
'refBlob': eid['refSampleType'],
'meta': meta
},
'driver': {
'val':eid['driverVal'],
'valQuote': eid['driver'],
'ref':[],
'refBlob': eid['refDriver'],
'meta': meta
},
## Pathogens
'pathogens':{
'drugResistance': {
'val':'',
'valQuote': eid_pathogen['pathogenDrugResistance'],
'ref':[],
'refBlob': eid_pathogen['refPathogenDrugResistance'],
'meta': meta
},
'reportedName': {
'val':'',
'valQuote': eid_pathogen['pathogenReportedName'],
'ref':[],
'refBlob': eid_pathogen['refPathogenReportedName'],
'meta': meta
},
'class': {
'val':'',
'valQuote': eid_pathogen['pathogenClass'],
'ref':[],
'refBlob': eid_pathogen['refPathogenClass'],
'meta': meta
},
'family': {
'val':'',
'valQuote': eid_pathogen['pathogenFamily'],
'ref':[],
'refBlob': eid_pathogen['refPathogenFamily'],
'meta': meta
},
'species': {
'val':'',
'valQuote': eid_pathogen['pathogenSpecies'],
'ref':[],
'refBlob': eid_pathogen['refPathogenSpecies'],
'meta': meta
},
'authority': {
'val':'',
'valQuote': eid_pathogen['pathogenAuthority'],
'ref':[],
'refBlob': eid_pathogen['refPathogenAuthority'],
'meta': meta
},
'taxOrder': {
'val':'',
'valQuote': eid_pathogen['pathogenTaxOrder'],
'ref':[],
'refBlob': eid_pathogen['refPathogenTaxOrder'],
'meta': meta
},
'genus': {
'val':'',
'valQuote': eid_pathogen['pathogenGenus'],
'ref':[],
'refBlob': eid_pathogen['refPathogenGenus'],
'meta': meta
},
'subSpecies': {
'val':'',
'valQuote': eid_pathogen['pathogenSubSpecies'],
'ref':[],
'refBlob': eid_pathogen['refPathogenSubSpecies'],
'meta': meta
}
}, #pathogens
## Locations
'locations':{
'name': {
'val':'',
'valQuote': eid_location['locationLocationName'],
'ref':[],
'refBlob': eid_location['refLocationLocationName'],
'meta': meta
},
'placeName': {
'val':'',
'valQuote': eid_location['locationPlaceName'],
'ref':[],
'refBlob': eid_location['refLocationPlaceName'],
'meta': meta
},
'latitude': {
'val':'',
'valQuote': eid_location['locationLatitude'],
'ref':[],
'refBlob': eid_location['refLocationLatitude'],
'meta': meta
},
'longitude': {
'val':'',
'valQuote': eid_location['locationLongitude'],
'ref':[],
'refBlob': eid_location['refLocationLongitude'],
'meta': meta
},
'city': {
'val':'',
'valQuote': eid_location['locationCity'],
'ref':[],
'refBlob': eid_location['refLocationCity'],
'meta': meta
},
'subnationalRegion': {
'val':'',
'valQuote': eid_location['locationSubnationalRegion'],
'ref':[],
'refBlob': eid_location['refLocationSubnationalRegion'],
'meta': meta
},
'nation': {
'val':'',
'valQuote': eid_location['locationNation'],
'ref':[],
'refBlob': eid_location['refLocationNation'],
'meta': meta
},
'continent': {
'val':'',
'valQuote': eid_location['locationContinent'],
'ref':[],
'refBlob': eid_location['refLocationContinent'],
'meta': meta
}
}, #locations
## Hosts
'hosts':{
'sex': {
'val':'',
'valQuote': eid['hostSex'],
'ref':[],
'refBlob': eid['refHostSex'],
'meta': meta
},
'domesticationStatus': {
'val':'',
'valQuote': eid['domesticationStatus'],
'ref':[],
'refBlob': eid['refDomesticationStatus'],
'meta': meta
},
'age': {
'val':eid['hostAgeVal'],
'valQuote': eid['hostAge'],
'ref':[],
'refBlob': eid['refHostAge'],
'meta': meta
},
'hostUse': {
'val':'',
'valQuote': eid['hostUse'],
'ref':[],
'refBlob': eid['refHostUse'],
'meta': meta
},
'reportedName': {
'val':'',
'valQuote': eid_host['hostReportedName'],
'ref':[],
'refBlob': eid_host['refHostReportedName'],
'meta': meta
},
'class': {
'val':'',
'valQuote': eid_host['hostClass'],
'ref':[],
'refBlob': eid_host['refHostClass'],
'meta': meta
},
'family': {
'val':'',
'valQuote': eid_host['hostFamily'],
'ref':[],
'refBlob': eid_host['refHostFamily'],
'meta': meta
},
'species': {
'val':'',
'valQuote': eid_host['hostSpecies'],
'ref':[],
'refBlob': eid_host['refHostSpecies'],
'meta': meta
},
'authority': {
'val':'',
'valQuote': eid_host['hostAuthority'],
'ref':[],
'refBlob': eid_host['refHostAuthority'],
'meta': meta
},
'taxOrder': {
'val':'',
'valQuote': eid_host['hostTaxOrder'],
'ref':[],
'refBlob': eid_host['refHostTaxOrder'],
'meta': meta
},
'genus': {
'val':'',
'valQuote': eid_host['hostGenus'],
'ref':[],
'refBlob': eid_host['refHostGenus'],
'meta': meta
},
'subSpecies': {
'val':'',
'valQuote': eid_host['hostSubSpecies'],
'ref':[],
'refBlob': eid_host['refHostSubSpecies'],
'meta': meta
}
}, #hosts
## Dates
'dates':{
'startDate':{
'val':'',
'valQuote': eid['startDate'],
'valForm': eid['startDateISO'],
'ref':[],
'refBlob': eid['refStartDate'],
'meta': meta
},
'endDate':{
'val':'',
'valQuote': eid['endDate'],
'valForm': eid['endDateISO'],
'ref':[],
'refBlob': eid['refEndDate'],
'meta': meta
},
'duration': {
'val':'',
'valQuote': eid['duration'],
'ref':[],
'refBlob': eid['refDuration'],
'meta': meta
}
}, #dates
## Characteristics
'characteristics':{
'numberInfected': {
'val':eid['numberInfectedVal'],
'valQuote': eid['numberInfected'],
'ref':[],
'refBlob': eid['refNumberInfected'],
'meta': meta
},
'prevalence': {
'val':'',
'valQuote': eid['prevalence'],
'ref':[],
'refBlob': eid['refPrevalence'],
'meta': meta
},
'symptomsReported': {
'val':eid['SymptomsReportedVal'],
'valQuote': eid['symptomsReported'],
'ref':[],
'refBlob': eid['refSymptomsReported'],
'meta': meta
},
'numberOfDeaths': {
'val':eid['numberOfDeathsVal'],
'valQuote': eid['numberOfDeaths'],
'ref':[],
'refBlob': eid['refNumberofDeaths'],
'meta': meta
},
}, #characteristics
## Contacts
'contacts':{
'firstName': '',
'lastName': '',
'email': '',
'affiliation': '',
'userID': '',
'blob': {
'val': eid['contact'],
'ref':[],
'refBlob': eid['refContact']
},
'meta': meta
}, # contacts
## Economics
'economics':{
'avgAgeInfected': {
'val':'',
'valQuote': eid_economics['avgAgeOfInfected'],
'ref':[],
'refBlob': eid_economics['refAvgAgeOfInfected'],
'meta': meta},
'avgAgeDeath': {
'val':'',
'valQuote': eid_economics['avgAgeDeath'],
'ref':[],
'refBlob': eid_economics['refAvgAgeDeath'],
'meta': meta},
'tradeTravelRestrictions': {
'val':'',
'valQuote': eid_economics['tradeTravelRestrictions'],
'ref':[],
'refBlob': eid_economics['refTradeTravelRestrictions'],
'meta': meta},
'numHospitalized': {
'val':'',
'valQuote': eid_economics['numHospitalizedInEvent'],
'ref':[],
'refBlob': eid_economics['refNumHospInEvent'],
'meta': meta},
'avgCostPerTreatment': {
'val':'',
'valQuote': eid_economics['avgCosPerTreatmentInEvent'],
'ref':[],
'refBlob': eid_economics['refAvgCostTreatmentInEvent'],
'meta': meta
},
'perCapitaNatGDPEventYear': {
'val':'',
'valQuote': eid_economics['perCapitaNationalGDPInYearOfEvent'],
'ref':[],
'refBlob': eid_economics['refPerCapitaNationalGDPInYearOfEvent'],
'meta': meta
},
'avgLifeExpectEventCountryYear': {
'val':'',
'valQuote': eid_economics['avgLifeExpectancyInCountryAndYearOfEvent'],
'ref':[],
'refBlob': eid_economics['refAvgLifeExpectancyInCountryAndYearOfEvent'],
'meta': meta
}
} # economics
} # value object
} #body
#fix the OjectID format
eid_body['_id'] = str(ObjectId())
#print eid_body['refs'][0]
#print count
#['sickiID']
#['meta']['reviewer']
db.entries.insert(eid_body) | 0.248443 | 0.127925 |
import dataclasses
import glob
import json
import logging
import os
import shutil
import site
import subprocess
import sys
from dataclasses import dataclass, field
from logging import Logger
from pathlib import Path
from typing import (
Any,
Dict,
Iterable,
List,
Optional,
Sequence,
Set,
Type,
TypeVar,
Union,
)
import psutil
from .. import command_arguments, dataclasses_merge, find_directories
from ..filesystem import expand_global_root, expand_relative_path
from ..find_directories import (
BINARY_NAME,
CONFIGURATION_FILE,
get_relative_local_root,
LOCAL_CONFIGURATION_FILE,
LOG_DIRECTORY,
)
from . import (
exceptions,
ide_features as ide_features_module,
platform_aware,
python_version as python_version_module,
search_path as search_path_module,
shared_memory as shared_memory_module,
site_packages,
unwatched,
)
LOG: Logger = logging.getLogger(__name__)
T = TypeVar("T")
def _get_optional_value(source: Optional[T], default: T) -> T:
return source if source is not None else default
def _expand_and_get_existent_ignore_all_errors_path(
ignore_all_errors: Iterable[str], project_root: str
) -> List[str]:
expanded_ignore_paths = []
for path in ignore_all_errors:
expanded = glob.glob(expand_global_root(path, global_root=project_root))
if not expanded:
expanded_ignore_paths.append(path)
else:
expanded_ignore_paths.extend(expanded)
paths = []
for path in expanded_ignore_paths:
if os.path.exists(path):
paths.append(path)
else:
LOG.warning(f"Nonexistent paths passed in to `ignore_all_errors`: `{path}`")
if _is_glob(path):
LOG.warning(
f"Within `ignore_all_errors`, no matches found to glob pattern: `{path}`"
)
else:
LOG.warning(
f"Nonexistent paths passed in to `ignore_all_errors`: `{path}`"
)
return paths
def _is_glob(path: str) -> bool:
if ("*" in path) or ("?" in path) or (("[" in path) and ("]" in path)):
return True
return False
@dataclasses.dataclass
class ExtensionElement:
suffix: str
include_suffix_in_module_qualifier: bool
def command_line_argument(self) -> str:
options = ""
if self.include_suffix_in_module_qualifier:
options = "$" + "include_suffix_in_module_qualifier"
return self.suffix + options
@staticmethod
def from_json(json: Union[str, Dict[str, Union[str, bool]]]) -> "ExtensionElement":
if isinstance(json, str):
return ExtensionElement(
suffix=json, include_suffix_in_module_qualifier=False
)
elif isinstance(json, dict):
include_suffix_in_module_qualifier = False
if "include_suffix_in_module_qualifier" in json:
value = json["include_suffix_in_module_qualifier"]
if isinstance(value, bool):
include_suffix_in_module_qualifier = value
if "suffix" in json:
suffix = json["suffix"]
if isinstance(suffix, str):
return ExtensionElement(
suffix=suffix,
include_suffix_in_module_qualifier=include_suffix_in_module_qualifier,
)
raise exceptions.InvalidConfiguration(f"Invalid extension element: {json}")
def get_default_site_roots() -> List[str]:
try:
return [site.getusersitepackages()] + site.getsitepackages()
except AttributeError:
# There are a few Python versions that ship with a broken venv,
# where `getsitepackages` is not available.
LOG.warning(
"Either `site.getusersitepackages()` or `site.getsitepackages()` "
+ "is not available in your virtualenv. This is a known virtualenv "
+ 'bug and as a workaround please explicitly specify `"site_root"` '
+ "in your Pyre configuration."
)
return []
@dataclasses_merge.dataclass_merge
@dataclass(frozen=True)
class PartialConfiguration:
binary: Optional[str] = None
buck_mode: Optional[platform_aware.PlatformAware[str]] = field(
default=None,
metadata={"merge_policy": platform_aware.PlatformAware.merge_optional},
)
do_not_ignore_errors_in: Sequence[str] = field(
default_factory=list,
metadata={"merge_policy": dataclasses_merge.Policy.PREPEND},
)
dot_pyre_directory: Optional[Path] = None
excludes: Sequence[str] = field(
default_factory=list,
metadata={"merge_policy": dataclasses_merge.Policy.PREPEND},
)
extensions: Sequence[ExtensionElement] = field(
default_factory=list,
metadata={"merge_policy": dataclasses_merge.Policy.PREPEND},
)
ide_features: Optional[ide_features_module.IdeFeatures] = field(
default=None,
metadata={"merge_policy": ide_features_module.IdeFeatures.merge_optional},
)
ignore_all_errors: Sequence[str] = field(
default_factory=list,
metadata={"merge_policy": dataclasses_merge.Policy.PREPEND},
)
isolation_prefix: Optional[str] = None
logger: Optional[str] = None
number_of_workers: Optional[int] = None
oncall: Optional[str] = None
other_critical_files: Sequence[str] = field(
default_factory=list,
metadata={"merge_policy": dataclasses_merge.Policy.PREPEND},
)
pysa_version_hash: Optional[str] = None
python_version: Optional[python_version_module.PythonVersion] = None
search_path: Sequence[search_path_module.RawElement] = field(
default_factory=list,
metadata={"merge_policy": dataclasses_merge.Policy.PREPEND},
)
shared_memory: shared_memory_module.SharedMemory = (
shared_memory_module.SharedMemory()
)
site_package_search_strategy: Optional[site_packages.SearchStrategy] = None
site_roots: Optional[Sequence[str]] = None
source_directories: Optional[Sequence[search_path_module.RawElement]] = field(
default=None,
metadata={"merge_policy": dataclasses_merge.Policy.RAISE_WHEN_OVERWRITTEN},
)
strict: Optional[bool] = None
taint_models_path: Sequence[str] = field(
default_factory=list,
metadata={"merge_policy": dataclasses_merge.Policy.PREPEND},
)
targets: Optional[Sequence[str]] = field(
default=None,
metadata={"merge_policy": dataclasses_merge.Policy.RAISE_WHEN_OVERWRITTEN},
)
typeshed: Optional[str] = None
unwatched_dependency: Optional[unwatched.UnwatchedDependency] = None
use_buck2: Optional[bool] = None
version_hash: Optional[str] = None
@staticmethod
def _get_depreacted_map() -> Dict[str, str]:
return {"do_not_check": "ignore_all_errors"}
@staticmethod
def _get_extra_keys() -> Set[str]:
return {
"create_open_source_configuration",
"saved_state",
"stable_client",
"taint_models_path",
"unstable_client",
}
@staticmethod
def from_command_arguments(
arguments: command_arguments.CommandArguments,
) -> "PartialConfiguration":
strict: Optional[bool] = True if arguments.strict else None
source_directories = [
search_path_module.SimpleRawElement(element)
for element in arguments.source_directories
] or None
targets: Optional[List[str]] = (
arguments.targets if len(arguments.targets) > 0 else None
)
python_version_string = arguments.python_version
ide_features = (
ide_features_module.IdeFeatures(
hover_enabled=arguments.enable_hover,
go_to_definition_enabled=arguments.enable_go_to_definition,
find_symbols_enabled=arguments.enable_find_symbols,
)
if arguments.enable_hover is not None
or arguments.enable_go_to_definition is not None
else None
)
return PartialConfiguration(
binary=arguments.binary,
buck_mode=platform_aware.PlatformAware.from_json(
arguments.buck_mode, "buck_mode"
),
do_not_ignore_errors_in=arguments.do_not_ignore_errors_in,
dot_pyre_directory=arguments.dot_pyre_directory,
excludes=arguments.exclude,
extensions=[],
ide_features=ide_features,
ignore_all_errors=[],
isolation_prefix=arguments.isolation_prefix,
logger=arguments.logger,
number_of_workers=arguments.number_of_workers,
oncall=None,
other_critical_files=[],
pysa_version_hash=None,
python_version=(
python_version_module.PythonVersion.from_string(python_version_string)
if python_version_string is not None
else None
),
search_path=[
search_path_module.SimpleRawElement(element)
for element in arguments.search_path
],
shared_memory=shared_memory_module.SharedMemory(
heap_size=arguments.shared_memory_heap_size,
dependency_table_power=arguments.shared_memory_dependency_table_power,
hash_table_power=arguments.shared_memory_hash_table_power,
),
site_package_search_strategy=None,
site_roots=None,
source_directories=source_directories,
strict=strict,
taint_models_path=[],
targets=targets,
typeshed=arguments.typeshed,
unwatched_dependency=None,
use_buck2=arguments.use_buck2,
version_hash=None,
)
@staticmethod
def from_string(contents: str) -> "PartialConfiguration":
def is_list_of_string(elements: object) -> bool:
return isinstance(elements, list) and all(
isinstance(element, str) for element in elements
)
def ensure_option_type(
json: Dict[str, Any], name: str, expected_type: Type[T]
) -> Optional[T]:
result = json.pop(name, None)
if result is None:
return None
elif isinstance(result, expected_type):
return result
raise exceptions.InvalidConfiguration(
f"Configuration field `{name}` is expected to have type "
f"{expected_type} but got: `{result}`."
)
def ensure_optional_string_or_string_dict(
json: Dict[str, Any], name: str
) -> Optional[Union[Dict[str, str], str]]:
result = json.pop(name, None)
if result is None:
return None
elif isinstance(result, str):
return result
elif isinstance(result, Dict):
for value in result.values():
if not isinstance(value, str):
raise exceptions.InvalidConfiguration(
f"Configuration field `{name}` is expected to be a "
+ f"dict of strings but got `{result}`."
)
return result
raise exceptions.InvalidConfiguration(
f"Configuration field `{name}` is expected to be a string or a "
+ f"dict of strings but got `{result}`."
)
def ensure_optional_string_list(
json: Dict[str, Any], name: str
) -> Optional[List[str]]:
result = json.pop(name, None)
if result is None:
return None
elif is_list_of_string(result):
return result
raise exceptions.InvalidConfiguration(
f"Configuration field `{name}` is expected to be a list of "
+ f"strings but got `{result}`."
)
def ensure_string_list(
json: Dict[str, Any], name: str, allow_single_string: bool = False
) -> List[str]:
result = json.pop(name, [])
if allow_single_string and isinstance(result, str):
result = [result]
if is_list_of_string(result):
return result
raise exceptions.InvalidConfiguration(
f"Configuration field `{name}` is expected to be a list of "
+ f"strings but got `{result}`."
)
def ensure_list(json: Dict[str, object], name: str) -> List[object]:
result = json.pop(name, [])
if isinstance(result, list):
return result
raise exceptions.InvalidConfiguration(
f"Configuration field `{name}` is expected to be a list but got `{result}`."
)
try:
configuration_json = json.loads(contents)
dot_pyre_directory = ensure_option_type(
configuration_json, "dot_pyre_directory", str
)
search_path_json = configuration_json.pop("search_path", [])
if isinstance(search_path_json, list):
search_path = [
search_path_module.create_raw_element(json)
for json in search_path_json
]
else:
search_path = [search_path_module.create_raw_element(search_path_json)]
python_version_json = configuration_json.pop("python_version", None)
if python_version_json is None:
python_version = None
elif isinstance(python_version_json, str):
python_version = python_version_module.PythonVersion.from_string(
python_version_json
)
else:
raise exceptions.InvalidConfiguration(
"Expect python version to be a string but got"
+ f"'{python_version_json}'"
)
shared_memory_json = ensure_option_type(
configuration_json, "shared_memory", dict
)
if shared_memory_json is None:
shared_memory = shared_memory_module.SharedMemory()
else:
shared_memory = shared_memory_module.SharedMemory(
heap_size=ensure_option_type(shared_memory_json, "heap_size", int),
dependency_table_power=ensure_option_type(
shared_memory_json, "dependency_table_power", int
),
hash_table_power=ensure_option_type(
shared_memory_json, "hash_table_power", int
),
)
for unrecognized_key in shared_memory_json:
LOG.warning(f"Unrecognized configuration item: {unrecognized_key}")
source_directories_json = ensure_option_type(
configuration_json, "source_directories", list
)
if isinstance(source_directories_json, list):
source_directories = [
search_path_module.create_raw_element(json)
for json in source_directories_json
]
else:
source_directories = None
site_package_search_strategy_json = ensure_option_type(
configuration_json, "site_package_search_strategy", str
)
if site_package_search_strategy_json is None:
site_package_search_strategy = None
else:
site_package_search_strategy = site_packages.SearchStrategy.from_string(
site_package_search_strategy_json
)
if site_package_search_strategy is None:
raise exceptions.InvalidConfiguration(
"Invalid value for `site_package_search_strategy`: "
f"{site_package_search_strategy_json}. Available choices: "
f"{[str(x) for x in site_packages.SearchStrategy]}."
)
ide_features_json = ensure_option_type(
configuration_json, "ide_features", dict
)
if ide_features_json is None:
ide_features = None
else:
ide_features = ide_features_module.IdeFeatures.create_from_json(
ide_features_json
)
unwatched_dependency_json = ensure_option_type(
configuration_json, "unwatched_dependency", dict
)
if unwatched_dependency_json is None:
unwatched_dependency = None
else:
unwatched_dependency = unwatched.UnwatchedDependency.from_json(
unwatched_dependency_json
)
partial_configuration = PartialConfiguration(
binary=ensure_option_type(configuration_json, "binary", str),
buck_mode=platform_aware.PlatformAware.from_json(
ensure_optional_string_or_string_dict(
configuration_json, "buck_mode"
),
"buck_mode",
),
do_not_ignore_errors_in=ensure_string_list(
configuration_json, "do_not_ignore_errors_in"
),
dot_pyre_directory=Path(dot_pyre_directory)
if dot_pyre_directory is not None
else None,
excludes=ensure_string_list(
configuration_json, "exclude", allow_single_string=True
),
extensions=[
# pyre-fixme[6]: we did not fully verify the type of `json`
ExtensionElement.from_json(json)
for json in ensure_list(configuration_json, "extensions")
],
ide_features=ide_features,
ignore_all_errors=ensure_string_list(
configuration_json, "ignore_all_errors"
),
isolation_prefix=ensure_option_type(
configuration_json, "isolation_prefix", str
),
logger=ensure_option_type(configuration_json, "logger", str),
number_of_workers=ensure_option_type(
configuration_json, "workers", int
),
oncall=ensure_option_type(configuration_json, "oncall", str),
other_critical_files=ensure_string_list(
configuration_json, "critical_files"
),
pysa_version_hash=ensure_option_type(
configuration_json, "pysa_version", str
),
python_version=python_version,
search_path=search_path,
shared_memory=shared_memory,
site_package_search_strategy=site_package_search_strategy,
site_roots=ensure_optional_string_list(
configuration_json, "site_roots"
),
source_directories=source_directories,
strict=ensure_option_type(configuration_json, "strict", bool),
taint_models_path=ensure_string_list(
configuration_json, "taint_models_path", allow_single_string=True
),
targets=ensure_optional_string_list(configuration_json, "targets"),
typeshed=ensure_option_type(configuration_json, "typeshed", str),
unwatched_dependency=unwatched_dependency,
use_buck2=ensure_option_type(configuration_json, "use_buck2", bool),
version_hash=ensure_option_type(configuration_json, "version", str),
)
# Check for deprecated and unused keys
for (
deprecated_key,
replacement_key,
) in PartialConfiguration._get_depreacted_map().items():
if deprecated_key in configuration_json:
configuration_json.pop(deprecated_key)
LOG.warning(
f"Configuration file uses deprecated item `{deprecated_key}`. "
f"Please migrate to its replacement `{replacement_key}`"
)
extra_keys = PartialConfiguration._get_extra_keys()
for unrecognized_key in configuration_json:
if unrecognized_key not in extra_keys:
LOG.warning(f"Unrecognized configuration item: {unrecognized_key}")
return partial_configuration
except json.JSONDecodeError as error:
raise exceptions.InvalidConfiguration(f"Invalid JSON file: {error}")
@staticmethod
def from_file(path: Path) -> "PartialConfiguration":
try:
contents = path.read_text(encoding="utf-8")
return PartialConfiguration.from_string(contents)
except OSError as error:
raise exceptions.InvalidConfiguration(f"Error when reading {path}: {error}")
def expand_relative_paths(self, root: str) -> "PartialConfiguration":
binary = self.binary
if binary is not None:
binary = expand_relative_path(root, binary)
logger = self.logger
if logger is not None:
logger = expand_relative_path(root, logger)
source_directories = self.source_directories
if source_directories is not None:
source_directories = [
path.expand_relative_root(root) for path in source_directories
]
typeshed = self.typeshed
if typeshed is not None:
typeshed = expand_relative_path(root, typeshed)
unwatched_dependency = self.unwatched_dependency
if unwatched_dependency is not None:
files = unwatched_dependency.files
unwatched_dependency = unwatched.UnwatchedDependency(
change_indicator=unwatched_dependency.change_indicator,
files=unwatched.UnwatchedFiles(
root=expand_relative_path(root, files.root),
checksum_path=files.checksum_path,
),
)
return PartialConfiguration(
binary=binary,
buck_mode=self.buck_mode,
do_not_ignore_errors_in=[
expand_relative_path(root, path)
for path in self.do_not_ignore_errors_in
],
dot_pyre_directory=self.dot_pyre_directory,
excludes=self.excludes,
extensions=self.extensions,
ide_features=self.ide_features,
ignore_all_errors=[
expand_relative_path(root, path) for path in self.ignore_all_errors
],
isolation_prefix=self.isolation_prefix,
logger=logger,
number_of_workers=self.number_of_workers,
oncall=self.oncall,
other_critical_files=[
expand_relative_path(root, path) for path in self.other_critical_files
],
pysa_version_hash=self.pysa_version_hash,
python_version=self.python_version,
search_path=[path.expand_relative_root(root) for path in self.search_path],
shared_memory=self.shared_memory,
site_package_search_strategy=self.site_package_search_strategy,
site_roots=self.site_roots,
source_directories=source_directories,
strict=self.strict,
taint_models_path=[
expand_relative_path(root, path) for path in self.taint_models_path
],
targets=self.targets,
typeshed=typeshed,
unwatched_dependency=unwatched_dependency,
use_buck2=self.use_buck2,
version_hash=self.version_hash,
)
def merge_partial_configurations(
base: PartialConfiguration, override: PartialConfiguration
) -> PartialConfiguration:
try:
# pyre-ignore[16]: Pyre does not understand `dataclass_merge`
return PartialConfiguration.merge(base, override)
except dataclasses_merge.DataclassMergeError as error:
raise exceptions.InvalidConfiguration(str(error))
@dataclass(frozen=True)
class Configuration:
project_root: str
dot_pyre_directory: Path
binary: Optional[str] = None
buck_mode: Optional[platform_aware.PlatformAware[str]] = None
do_not_ignore_errors_in: Sequence[str] = field(default_factory=list)
excludes: Sequence[str] = field(default_factory=list)
extensions: Sequence[ExtensionElement] = field(default_factory=list)
ide_features: Optional[ide_features_module.IdeFeatures] = None
ignore_all_errors: Sequence[str] = field(default_factory=list)
isolation_prefix: Optional[str] = None
logger: Optional[str] = None
number_of_workers: Optional[int] = None
oncall: Optional[str] = None
other_critical_files: Sequence[str] = field(default_factory=list)
pysa_version_hash: Optional[str] = None
python_version: Optional[python_version_module.PythonVersion] = None
relative_local_root: Optional[str] = None
search_path: Sequence[search_path_module.RawElement] = field(default_factory=list)
shared_memory: shared_memory_module.SharedMemory = (
shared_memory_module.SharedMemory()
)
site_package_search_strategy: site_packages.SearchStrategy = (
site_packages.SearchStrategy.NONE
)
site_roots: Optional[Sequence[str]] = None
source_directories: Optional[Sequence[search_path_module.RawElement]] = None
strict: bool = False
taint_models_path: Sequence[str] = field(default_factory=list)
targets: Optional[Sequence[str]] = None
typeshed: Optional[str] = None
unwatched_dependency: Optional[unwatched.UnwatchedDependency] = None
use_buck2: bool = False
version_hash: Optional[str] = None
@staticmethod
def from_partial_configuration(
project_root: Path,
relative_local_root: Optional[str],
partial_configuration: PartialConfiguration,
) -> "Configuration":
search_path = partial_configuration.search_path
return Configuration(
project_root=str(project_root),
dot_pyre_directory=_get_optional_value(
partial_configuration.dot_pyre_directory, project_root / LOG_DIRECTORY
),
binary=partial_configuration.binary,
buck_mode=partial_configuration.buck_mode,
do_not_ignore_errors_in=partial_configuration.do_not_ignore_errors_in,
excludes=partial_configuration.excludes,
extensions=partial_configuration.extensions,
ide_features=partial_configuration.ide_features,
ignore_all_errors=partial_configuration.ignore_all_errors,
isolation_prefix=partial_configuration.isolation_prefix,
logger=partial_configuration.logger,
number_of_workers=partial_configuration.number_of_workers,
oncall=partial_configuration.oncall,
other_critical_files=partial_configuration.other_critical_files,
pysa_version_hash=partial_configuration.pysa_version_hash,
python_version=partial_configuration.python_version,
relative_local_root=relative_local_root,
search_path=[
path.expand_global_root(str(project_root)) for path in search_path
],
shared_memory=partial_configuration.shared_memory,
site_package_search_strategy=partial_configuration.site_package_search_strategy
or site_packages.SearchStrategy.NONE,
site_roots=partial_configuration.site_roots,
source_directories=partial_configuration.source_directories,
strict=_get_optional_value(partial_configuration.strict, default=False),
taint_models_path=partial_configuration.taint_models_path,
targets=partial_configuration.targets,
typeshed=partial_configuration.typeshed,
unwatched_dependency=partial_configuration.unwatched_dependency,
use_buck2=_get_optional_value(
partial_configuration.use_buck2, default=False
),
version_hash=partial_configuration.version_hash,
)
@property
def log_directory(self) -> str:
if self.relative_local_root is None:
return str(self.dot_pyre_directory)
return str(self.dot_pyre_directory / self.relative_local_root)
@property
def local_root(self) -> Optional[str]:
if self.relative_local_root is None:
return None
return os.path.join(self.project_root, self.relative_local_root)
def to_json(self) -> Dict[str, object]:
"""
This method is for display purpose only. Do *NOT* expect this method
to produce JSONs that can be de-serialized back into configurations.
"""
binary = self.binary
buck_mode = self.buck_mode
isolation_prefix = self.isolation_prefix
logger = self.logger
number_of_workers = self.number_of_workers
oncall = self.oncall
pysa_version_hash = self.pysa_version_hash
python_version = self.python_version
relative_local_root = self.relative_local_root
source_directories = self.source_directories
site_package_search_strategy = self.site_package_search_strategy
site_roots = self.site_roots
targets = self.targets
typeshed = self.typeshed
unwatched_dependency = self.unwatched_dependency
version_hash = self.version_hash
return {
"global_root": self.project_root,
"dot_pyre_directory": str(self.dot_pyre_directory),
**({"binary": binary} if binary is not None else {}),
**({"buck_mode": buck_mode.to_json()} if buck_mode is not None else {}),
"do_not_ignore_errors_in": list(self.do_not_ignore_errors_in),
"excludes": list(self.excludes),
"extensions": list(self.extensions),
"ignore_all_errors": list(self.ignore_all_errors),
**(
{"isolation_prefix": isolation_prefix}
if isolation_prefix is not None
else {}
),
**({"logger": logger} if logger is not None else {}),
**({"oncall": oncall} if oncall is not None else {}),
**({"workers": number_of_workers} if number_of_workers is not None else {}),
"other_critical_files": list(self.other_critical_files),
**(
{"pysa_version_hash": pysa_version_hash}
if pysa_version_hash is not None
else {}
),
**(
{"python_version": python_version.to_string()}
if python_version is not None
else {}
),
**(
{"relative_local_root": relative_local_root}
if relative_local_root is not None
else {}
),
"search_path": [str(path) for path in self.search_path],
**(
{"shared_memory": self.shared_memory.to_json()}
if self.shared_memory != shared_memory_module.SharedMemory()
else {}
),
**(
{"site_package_search_strategy": site_package_search_strategy}
if site_package_search_strategy is not None
else {}
),
"site_roots": site_roots if site_roots is not None else [],
**(
{"source_directories": [str(path) for path in source_directories]}
if source_directories is not None
else {}
),
"strict": self.strict,
"taint_models_path": list(self.taint_models_path),
**({"targets": list(targets)} if targets is not None else {}),
**({"typeshed": typeshed} if typeshed is not None else {}),
**(
{"unwatched_dependency": unwatched_dependency.to_json()}
if unwatched_dependency is not None
else {}
),
"use_buck2": self.use_buck2,
**({"version_hash": version_hash} if version_hash is not None else {}),
}
def get_existent_unwatched_dependency(
self,
) -> Optional[unwatched.UnwatchedDependency]:
unwatched_dependency = self.unwatched_dependency
if unwatched_dependency is None:
return None
unwatched_root = Path(unwatched_dependency.files.root)
try:
if not unwatched_root.is_dir():
LOG.warning(
"Nonexistent directory passed in to `unwatched_dependency`: "
f"`{unwatched_root}`"
)
return None
checksum_path = unwatched_root / unwatched_dependency.files.checksum_path
if not checksum_path.is_file():
LOG.warning(
"Nonexistent file passed in to `unwatched_dependency`: "
f"`{checksum_path}`"
)
return None
return self.unwatched_dependency
except PermissionError as error:
LOG.warning(str(error))
return None
def get_site_roots(self) -> Sequence[str]:
site_roots = self.site_roots
if site_roots is not None:
return site_roots
return get_default_site_roots()
def expand_and_get_existent_search_paths(
self,
) -> List[search_path_module.Element]:
site_roots = self.get_site_roots()
existent_paths = search_path_module.process_raw_elements(
self.search_path, site_roots
)
site_packages_paths = site_packages.search_for_paths(
self.site_package_search_strategy, site_roots
)
typeshed_root = self.get_typeshed_respecting_override()
if typeshed_root is None:
return existent_paths + site_packages_paths
typeshed_paths: List[search_path_module.Element] = [
search_path_module.SimpleElement(str(element))
for element in find_directories.find_typeshed_search_paths(
Path(typeshed_root)
)
]
return existent_paths + site_packages_paths + typeshed_paths
def expand_and_get_existent_source_directories(
self,
) -> List[search_path_module.Element]:
source_directories = self.source_directories
if source_directories is not None:
return search_path_module.process_raw_elements(
source_directories, self.get_site_roots()
)
else:
return []
def get_existent_do_not_ignore_errors_in_paths(self) -> List[str]:
"""
This is a separate method because we want to check for existing files
at the time this is called, not when the configuration is
constructed.
"""
ignore_paths = [
expand_global_root(path, global_root=self.project_root)
for path in self.do_not_ignore_errors_in
]
paths = []
for path in ignore_paths:
if os.path.exists(path):
paths.append(path)
else:
LOG.debug(
"Filtering out nonexistent paths in `do_not_ignore_errors_in`: "
f"{path}"
)
return paths
def get_existent_ignore_all_errors_paths(self) -> List[str]:
"""
This is a separate method because we want to check for existing files
at the time this is called, not when the configuration is
constructed.
"""
return _expand_and_get_existent_ignore_all_errors_path(
self.ignore_all_errors, self.project_root
)
def get_binary_respecting_override(self) -> Optional[str]:
binary = self.binary
if binary is not None:
return binary
LOG.info(f"No binary specified, looking for `{BINARY_NAME}` in PATH")
binary_candidate = shutil.which(BINARY_NAME)
if binary_candidate is None:
binary_candidate_name = os.path.join(
os.path.dirname(sys.argv[0]), BINARY_NAME
)
binary_candidate = shutil.which(binary_candidate_name)
if binary_candidate is not None:
return binary_candidate
return None
def get_typeshed_respecting_override(self) -> Optional[str]:
typeshed = self.typeshed
if typeshed is not None:
return typeshed
LOG.info("No typeshed specified, looking for it...")
auto_determined_typeshed = find_directories.find_typeshed()
if auto_determined_typeshed is None:
LOG.warning(
"Could not find a suitable typeshed. Types for Python builtins "
"and standard libraries may be missing!"
)
return None
else:
LOG.info(f"Found: `{auto_determined_typeshed}`")
return str(auto_determined_typeshed)
def get_version_hash_respecting_override(self) -> Optional[str]:
overriding_version_hash = os.getenv("PYRE_VERSION_HASH")
if overriding_version_hash:
LOG.warning(f"Version hash overridden with `{overriding_version_hash}`")
return overriding_version_hash
return self.version_hash
def get_binary_version(self) -> Optional[str]:
binary = self.get_binary_respecting_override()
if binary is None:
return None
status = subprocess.run(
[binary, "-version"], stdout=subprocess.PIPE, universal_newlines=True
)
return status.stdout.strip() if status.returncode == 0 else None
def get_number_of_workers(self) -> int:
number_of_workers = self.number_of_workers
if number_of_workers is not None and number_of_workers > 0:
return number_of_workers
# pyre-fixme[28]: Unexpected keyword argument `logical`.
number_of_physical_cores = psutil.cpu_count(logical=False)
if number_of_physical_cores is None:
default_number_of_workers = 1
else:
default_number_of_workers = max(1, number_of_physical_cores - 1)
LOG.info(
"Could not determine the number of Pyre workers from configuration. "
f"Auto-set the value to {default_number_of_workers}."
)
if default_number_of_workers <= 1:
LOG.info(
"Consider setting the `--sequential` flag instead when the number "
"of parallel workers is not greater than 1."
)
return default_number_of_workers
def is_hover_enabled(self) -> bool:
if self.ide_features is None:
return ide_features_module.IdeFeatures.DEFAULT_HOVER_ENABLED
return self.ide_features.is_hover_enabled()
def is_go_to_definition_enabled(self) -> bool:
if self.ide_features is None:
return ide_features_module.IdeFeatures.DEFAULT_GO_TO_DEFINITION_ENABLED
return self.ide_features.is_go_to_definition_enabled()
def is_find_symbols_enabled(self) -> bool:
if self.ide_features is None:
return ide_features_module.IdeFeatures.DEFAULT_FIND_SYMBOLS_ENABLED
return self.ide_features.is_find_symbols_enabled()
def get_valid_extension_suffixes(self) -> List[str]:
vaild_extensions = []
for extension in self.extensions:
if not extension.suffix.startswith("."):
LOG.warning(
"Filtering out extension which does not start with `.`: "
f"`{extension.suffix}`"
)
else:
vaild_extensions.append(extension.command_line_argument())
return vaild_extensions
def get_isolation_prefix_respecting_override(self) -> Optional[str]:
"""We need this to disable an isolation prefix set in a configuration.
Merely omitting the CLI flag would not disable the isolation prefix
because we would just fall back to the configuration value.
With this, we can pass `--isolation-prefix ''` as a CLI argument or
override `isolation_prefix` as `""` in a local configuration."""
return None if self.isolation_prefix == "" else self.isolation_prefix
def get_python_version(self) -> python_version_module.PythonVersion:
python_version = self.python_version
if python_version is not None:
return python_version
else:
version_info = sys.version_info
return python_version_module.PythonVersion(
major=version_info.major,
minor=version_info.minor,
micro=version_info.micro,
)
def create_configuration(
arguments: command_arguments.CommandArguments, base_directory: Path
) -> Configuration:
local_root_argument = arguments.local_configuration
search_base = (
base_directory
if local_root_argument is None
else base_directory / local_root_argument
)
found_root = find_directories.find_global_and_local_root(search_base)
# If the local root was explicitly specified but does not exist, return an
# error instead of falling back to current directory.
if local_root_argument is not None:
if found_root is None:
raise exceptions.InvalidConfiguration(
"A local configuration path was explicitly specified, but no"
+ f" {CONFIGURATION_FILE} file was found in {search_base}"
+ " or its parents."
)
elif found_root.local_root is None:
raise exceptions.InvalidConfiguration(
"A local configuration path was explicitly specified, but no"
+ f" {LOCAL_CONFIGURATION_FILE} file was found in {search_base}"
+ " or its parents."
)
command_argument_configuration = PartialConfiguration.from_command_arguments(
arguments
).expand_relative_paths(str(Path.cwd()))
if found_root is None:
project_root = Path.cwd()
relative_local_root = None
partial_configuration = command_argument_configuration
else:
project_root = found_root.global_root
relative_local_root = None
partial_configuration = PartialConfiguration.from_file(
project_root / CONFIGURATION_FILE
).expand_relative_paths(str(project_root))
local_root = found_root.local_root
if local_root is not None:
relative_local_root = get_relative_local_root(project_root, local_root)
partial_configuration = merge_partial_configurations(
base=partial_configuration,
override=PartialConfiguration.from_file(
local_root / LOCAL_CONFIGURATION_FILE
).expand_relative_paths(str(local_root)),
)
partial_configuration = merge_partial_configurations(
base=partial_configuration,
override=command_argument_configuration,
)
return Configuration.from_partial_configuration(
project_root, relative_local_root, partial_configuration
)
def check_nested_local_configuration(configuration: Configuration) -> None:
"""
Raises `InvalidConfiguration` if the check fails.
"""
local_root = configuration.local_root
if local_root is None:
return
def is_subdirectory(child: Path, parent: Path) -> bool:
return parent == child or parent in child.parents
# We search from the parent of the local root, looking for another local
# configuration file that lives above the current one
local_root_path = Path(local_root).resolve()
current_directory = local_root_path.parent
while True:
found_root = find_directories.find_global_and_local_root(current_directory)
if found_root is None:
break
nesting_local_root = found_root.local_root
if nesting_local_root is None:
break
nesting_configuration = PartialConfiguration.from_file(
nesting_local_root / LOCAL_CONFIGURATION_FILE
).expand_relative_paths(str(nesting_local_root))
nesting_ignored_all_errors_path = (
_expand_and_get_existent_ignore_all_errors_path(
nesting_configuration.ignore_all_errors, str(found_root.global_root)
)
)
if not any(
is_subdirectory(child=local_root_path, parent=Path(path))
for path in nesting_ignored_all_errors_path
):
error_message = (
"Local configuration is nested under another local configuration at "
f"`{nesting_local_root}`.\nPlease add `{local_root_path}` to the "
"`ignore_all_errors` field of the parent, or combine the sources "
"into a single configuration, or split the parent configuration to "
"avoid inconsistent errors."
)
raise exceptions.InvalidConfiguration(error_message)
current_directory = nesting_local_root.parent | client/configuration/configuration.py |
import dataclasses
import glob
import json
import logging
import os
import shutil
import site
import subprocess
import sys
from dataclasses import dataclass, field
from logging import Logger
from pathlib import Path
from typing import (
Any,
Dict,
Iterable,
List,
Optional,
Sequence,
Set,
Type,
TypeVar,
Union,
)
import psutil
from .. import command_arguments, dataclasses_merge, find_directories
from ..filesystem import expand_global_root, expand_relative_path
from ..find_directories import (
BINARY_NAME,
CONFIGURATION_FILE,
get_relative_local_root,
LOCAL_CONFIGURATION_FILE,
LOG_DIRECTORY,
)
from . import (
exceptions,
ide_features as ide_features_module,
platform_aware,
python_version as python_version_module,
search_path as search_path_module,
shared_memory as shared_memory_module,
site_packages,
unwatched,
)
LOG: Logger = logging.getLogger(__name__)
T = TypeVar("T")
def _get_optional_value(source: Optional[T], default: T) -> T:
return source if source is not None else default
def _expand_and_get_existent_ignore_all_errors_path(
ignore_all_errors: Iterable[str], project_root: str
) -> List[str]:
expanded_ignore_paths = []
for path in ignore_all_errors:
expanded = glob.glob(expand_global_root(path, global_root=project_root))
if not expanded:
expanded_ignore_paths.append(path)
else:
expanded_ignore_paths.extend(expanded)
paths = []
for path in expanded_ignore_paths:
if os.path.exists(path):
paths.append(path)
else:
LOG.warning(f"Nonexistent paths passed in to `ignore_all_errors`: `{path}`")
if _is_glob(path):
LOG.warning(
f"Within `ignore_all_errors`, no matches found to glob pattern: `{path}`"
)
else:
LOG.warning(
f"Nonexistent paths passed in to `ignore_all_errors`: `{path}`"
)
return paths
def _is_glob(path: str) -> bool:
if ("*" in path) or ("?" in path) or (("[" in path) and ("]" in path)):
return True
return False
@dataclasses.dataclass
class ExtensionElement:
suffix: str
include_suffix_in_module_qualifier: bool
def command_line_argument(self) -> str:
options = ""
if self.include_suffix_in_module_qualifier:
options = "$" + "include_suffix_in_module_qualifier"
return self.suffix + options
@staticmethod
def from_json(json: Union[str, Dict[str, Union[str, bool]]]) -> "ExtensionElement":
if isinstance(json, str):
return ExtensionElement(
suffix=json, include_suffix_in_module_qualifier=False
)
elif isinstance(json, dict):
include_suffix_in_module_qualifier = False
if "include_suffix_in_module_qualifier" in json:
value = json["include_suffix_in_module_qualifier"]
if isinstance(value, bool):
include_suffix_in_module_qualifier = value
if "suffix" in json:
suffix = json["suffix"]
if isinstance(suffix, str):
return ExtensionElement(
suffix=suffix,
include_suffix_in_module_qualifier=include_suffix_in_module_qualifier,
)
raise exceptions.InvalidConfiguration(f"Invalid extension element: {json}")
def get_default_site_roots() -> List[str]:
try:
return [site.getusersitepackages()] + site.getsitepackages()
except AttributeError:
# There are a few Python versions that ship with a broken venv,
# where `getsitepackages` is not available.
LOG.warning(
"Either `site.getusersitepackages()` or `site.getsitepackages()` "
+ "is not available in your virtualenv. This is a known virtualenv "
+ 'bug and as a workaround please explicitly specify `"site_root"` '
+ "in your Pyre configuration."
)
return []
@dataclasses_merge.dataclass_merge
@dataclass(frozen=True)
class PartialConfiguration:
binary: Optional[str] = None
buck_mode: Optional[platform_aware.PlatformAware[str]] = field(
default=None,
metadata={"merge_policy": platform_aware.PlatformAware.merge_optional},
)
do_not_ignore_errors_in: Sequence[str] = field(
default_factory=list,
metadata={"merge_policy": dataclasses_merge.Policy.PREPEND},
)
dot_pyre_directory: Optional[Path] = None
excludes: Sequence[str] = field(
default_factory=list,
metadata={"merge_policy": dataclasses_merge.Policy.PREPEND},
)
extensions: Sequence[ExtensionElement] = field(
default_factory=list,
metadata={"merge_policy": dataclasses_merge.Policy.PREPEND},
)
ide_features: Optional[ide_features_module.IdeFeatures] = field(
default=None,
metadata={"merge_policy": ide_features_module.IdeFeatures.merge_optional},
)
ignore_all_errors: Sequence[str] = field(
default_factory=list,
metadata={"merge_policy": dataclasses_merge.Policy.PREPEND},
)
isolation_prefix: Optional[str] = None
logger: Optional[str] = None
number_of_workers: Optional[int] = None
oncall: Optional[str] = None
other_critical_files: Sequence[str] = field(
default_factory=list,
metadata={"merge_policy": dataclasses_merge.Policy.PREPEND},
)
pysa_version_hash: Optional[str] = None
python_version: Optional[python_version_module.PythonVersion] = None
search_path: Sequence[search_path_module.RawElement] = field(
default_factory=list,
metadata={"merge_policy": dataclasses_merge.Policy.PREPEND},
)
shared_memory: shared_memory_module.SharedMemory = (
shared_memory_module.SharedMemory()
)
site_package_search_strategy: Optional[site_packages.SearchStrategy] = None
site_roots: Optional[Sequence[str]] = None
source_directories: Optional[Sequence[search_path_module.RawElement]] = field(
default=None,
metadata={"merge_policy": dataclasses_merge.Policy.RAISE_WHEN_OVERWRITTEN},
)
strict: Optional[bool] = None
taint_models_path: Sequence[str] = field(
default_factory=list,
metadata={"merge_policy": dataclasses_merge.Policy.PREPEND},
)
targets: Optional[Sequence[str]] = field(
default=None,
metadata={"merge_policy": dataclasses_merge.Policy.RAISE_WHEN_OVERWRITTEN},
)
typeshed: Optional[str] = None
unwatched_dependency: Optional[unwatched.UnwatchedDependency] = None
use_buck2: Optional[bool] = None
version_hash: Optional[str] = None
@staticmethod
def _get_depreacted_map() -> Dict[str, str]:
return {"do_not_check": "ignore_all_errors"}
@staticmethod
def _get_extra_keys() -> Set[str]:
return {
"create_open_source_configuration",
"saved_state",
"stable_client",
"taint_models_path",
"unstable_client",
}
@staticmethod
def from_command_arguments(
arguments: command_arguments.CommandArguments,
) -> "PartialConfiguration":
strict: Optional[bool] = True if arguments.strict else None
source_directories = [
search_path_module.SimpleRawElement(element)
for element in arguments.source_directories
] or None
targets: Optional[List[str]] = (
arguments.targets if len(arguments.targets) > 0 else None
)
python_version_string = arguments.python_version
ide_features = (
ide_features_module.IdeFeatures(
hover_enabled=arguments.enable_hover,
go_to_definition_enabled=arguments.enable_go_to_definition,
find_symbols_enabled=arguments.enable_find_symbols,
)
if arguments.enable_hover is not None
or arguments.enable_go_to_definition is not None
else None
)
return PartialConfiguration(
binary=arguments.binary,
buck_mode=platform_aware.PlatformAware.from_json(
arguments.buck_mode, "buck_mode"
),
do_not_ignore_errors_in=arguments.do_not_ignore_errors_in,
dot_pyre_directory=arguments.dot_pyre_directory,
excludes=arguments.exclude,
extensions=[],
ide_features=ide_features,
ignore_all_errors=[],
isolation_prefix=arguments.isolation_prefix,
logger=arguments.logger,
number_of_workers=arguments.number_of_workers,
oncall=None,
other_critical_files=[],
pysa_version_hash=None,
python_version=(
python_version_module.PythonVersion.from_string(python_version_string)
if python_version_string is not None
else None
),
search_path=[
search_path_module.SimpleRawElement(element)
for element in arguments.search_path
],
shared_memory=shared_memory_module.SharedMemory(
heap_size=arguments.shared_memory_heap_size,
dependency_table_power=arguments.shared_memory_dependency_table_power,
hash_table_power=arguments.shared_memory_hash_table_power,
),
site_package_search_strategy=None,
site_roots=None,
source_directories=source_directories,
strict=strict,
taint_models_path=[],
targets=targets,
typeshed=arguments.typeshed,
unwatched_dependency=None,
use_buck2=arguments.use_buck2,
version_hash=None,
)
@staticmethod
def from_string(contents: str) -> "PartialConfiguration":
def is_list_of_string(elements: object) -> bool:
return isinstance(elements, list) and all(
isinstance(element, str) for element in elements
)
def ensure_option_type(
json: Dict[str, Any], name: str, expected_type: Type[T]
) -> Optional[T]:
result = json.pop(name, None)
if result is None:
return None
elif isinstance(result, expected_type):
return result
raise exceptions.InvalidConfiguration(
f"Configuration field `{name}` is expected to have type "
f"{expected_type} but got: `{result}`."
)
def ensure_optional_string_or_string_dict(
json: Dict[str, Any], name: str
) -> Optional[Union[Dict[str, str], str]]:
result = json.pop(name, None)
if result is None:
return None
elif isinstance(result, str):
return result
elif isinstance(result, Dict):
for value in result.values():
if not isinstance(value, str):
raise exceptions.InvalidConfiguration(
f"Configuration field `{name}` is expected to be a "
+ f"dict of strings but got `{result}`."
)
return result
raise exceptions.InvalidConfiguration(
f"Configuration field `{name}` is expected to be a string or a "
+ f"dict of strings but got `{result}`."
)
def ensure_optional_string_list(
json: Dict[str, Any], name: str
) -> Optional[List[str]]:
result = json.pop(name, None)
if result is None:
return None
elif is_list_of_string(result):
return result
raise exceptions.InvalidConfiguration(
f"Configuration field `{name}` is expected to be a list of "
+ f"strings but got `{result}`."
)
def ensure_string_list(
json: Dict[str, Any], name: str, allow_single_string: bool = False
) -> List[str]:
result = json.pop(name, [])
if allow_single_string and isinstance(result, str):
result = [result]
if is_list_of_string(result):
return result
raise exceptions.InvalidConfiguration(
f"Configuration field `{name}` is expected to be a list of "
+ f"strings but got `{result}`."
)
def ensure_list(json: Dict[str, object], name: str) -> List[object]:
result = json.pop(name, [])
if isinstance(result, list):
return result
raise exceptions.InvalidConfiguration(
f"Configuration field `{name}` is expected to be a list but got `{result}`."
)
try:
configuration_json = json.loads(contents)
dot_pyre_directory = ensure_option_type(
configuration_json, "dot_pyre_directory", str
)
search_path_json = configuration_json.pop("search_path", [])
if isinstance(search_path_json, list):
search_path = [
search_path_module.create_raw_element(json)
for json in search_path_json
]
else:
search_path = [search_path_module.create_raw_element(search_path_json)]
python_version_json = configuration_json.pop("python_version", None)
if python_version_json is None:
python_version = None
elif isinstance(python_version_json, str):
python_version = python_version_module.PythonVersion.from_string(
python_version_json
)
else:
raise exceptions.InvalidConfiguration(
"Expect python version to be a string but got"
+ f"'{python_version_json}'"
)
shared_memory_json = ensure_option_type(
configuration_json, "shared_memory", dict
)
if shared_memory_json is None:
shared_memory = shared_memory_module.SharedMemory()
else:
shared_memory = shared_memory_module.SharedMemory(
heap_size=ensure_option_type(shared_memory_json, "heap_size", int),
dependency_table_power=ensure_option_type(
shared_memory_json, "dependency_table_power", int
),
hash_table_power=ensure_option_type(
shared_memory_json, "hash_table_power", int
),
)
for unrecognized_key in shared_memory_json:
LOG.warning(f"Unrecognized configuration item: {unrecognized_key}")
source_directories_json = ensure_option_type(
configuration_json, "source_directories", list
)
if isinstance(source_directories_json, list):
source_directories = [
search_path_module.create_raw_element(json)
for json in source_directories_json
]
else:
source_directories = None
site_package_search_strategy_json = ensure_option_type(
configuration_json, "site_package_search_strategy", str
)
if site_package_search_strategy_json is None:
site_package_search_strategy = None
else:
site_package_search_strategy = site_packages.SearchStrategy.from_string(
site_package_search_strategy_json
)
if site_package_search_strategy is None:
raise exceptions.InvalidConfiguration(
"Invalid value for `site_package_search_strategy`: "
f"{site_package_search_strategy_json}. Available choices: "
f"{[str(x) for x in site_packages.SearchStrategy]}."
)
ide_features_json = ensure_option_type(
configuration_json, "ide_features", dict
)
if ide_features_json is None:
ide_features = None
else:
ide_features = ide_features_module.IdeFeatures.create_from_json(
ide_features_json
)
unwatched_dependency_json = ensure_option_type(
configuration_json, "unwatched_dependency", dict
)
if unwatched_dependency_json is None:
unwatched_dependency = None
else:
unwatched_dependency = unwatched.UnwatchedDependency.from_json(
unwatched_dependency_json
)
partial_configuration = PartialConfiguration(
binary=ensure_option_type(configuration_json, "binary", str),
buck_mode=platform_aware.PlatformAware.from_json(
ensure_optional_string_or_string_dict(
configuration_json, "buck_mode"
),
"buck_mode",
),
do_not_ignore_errors_in=ensure_string_list(
configuration_json, "do_not_ignore_errors_in"
),
dot_pyre_directory=Path(dot_pyre_directory)
if dot_pyre_directory is not None
else None,
excludes=ensure_string_list(
configuration_json, "exclude", allow_single_string=True
),
extensions=[
# pyre-fixme[6]: we did not fully verify the type of `json`
ExtensionElement.from_json(json)
for json in ensure_list(configuration_json, "extensions")
],
ide_features=ide_features,
ignore_all_errors=ensure_string_list(
configuration_json, "ignore_all_errors"
),
isolation_prefix=ensure_option_type(
configuration_json, "isolation_prefix", str
),
logger=ensure_option_type(configuration_json, "logger", str),
number_of_workers=ensure_option_type(
configuration_json, "workers", int
),
oncall=ensure_option_type(configuration_json, "oncall", str),
other_critical_files=ensure_string_list(
configuration_json, "critical_files"
),
pysa_version_hash=ensure_option_type(
configuration_json, "pysa_version", str
),
python_version=python_version,
search_path=search_path,
shared_memory=shared_memory,
site_package_search_strategy=site_package_search_strategy,
site_roots=ensure_optional_string_list(
configuration_json, "site_roots"
),
source_directories=source_directories,
strict=ensure_option_type(configuration_json, "strict", bool),
taint_models_path=ensure_string_list(
configuration_json, "taint_models_path", allow_single_string=True
),
targets=ensure_optional_string_list(configuration_json, "targets"),
typeshed=ensure_option_type(configuration_json, "typeshed", str),
unwatched_dependency=unwatched_dependency,
use_buck2=ensure_option_type(configuration_json, "use_buck2", bool),
version_hash=ensure_option_type(configuration_json, "version", str),
)
# Check for deprecated and unused keys
for (
deprecated_key,
replacement_key,
) in PartialConfiguration._get_depreacted_map().items():
if deprecated_key in configuration_json:
configuration_json.pop(deprecated_key)
LOG.warning(
f"Configuration file uses deprecated item `{deprecated_key}`. "
f"Please migrate to its replacement `{replacement_key}`"
)
extra_keys = PartialConfiguration._get_extra_keys()
for unrecognized_key in configuration_json:
if unrecognized_key not in extra_keys:
LOG.warning(f"Unrecognized configuration item: {unrecognized_key}")
return partial_configuration
except json.JSONDecodeError as error:
raise exceptions.InvalidConfiguration(f"Invalid JSON file: {error}")
@staticmethod
def from_file(path: Path) -> "PartialConfiguration":
try:
contents = path.read_text(encoding="utf-8")
return PartialConfiguration.from_string(contents)
except OSError as error:
raise exceptions.InvalidConfiguration(f"Error when reading {path}: {error}")
def expand_relative_paths(self, root: str) -> "PartialConfiguration":
binary = self.binary
if binary is not None:
binary = expand_relative_path(root, binary)
logger = self.logger
if logger is not None:
logger = expand_relative_path(root, logger)
source_directories = self.source_directories
if source_directories is not None:
source_directories = [
path.expand_relative_root(root) for path in source_directories
]
typeshed = self.typeshed
if typeshed is not None:
typeshed = expand_relative_path(root, typeshed)
unwatched_dependency = self.unwatched_dependency
if unwatched_dependency is not None:
files = unwatched_dependency.files
unwatched_dependency = unwatched.UnwatchedDependency(
change_indicator=unwatched_dependency.change_indicator,
files=unwatched.UnwatchedFiles(
root=expand_relative_path(root, files.root),
checksum_path=files.checksum_path,
),
)
return PartialConfiguration(
binary=binary,
buck_mode=self.buck_mode,
do_not_ignore_errors_in=[
expand_relative_path(root, path)
for path in self.do_not_ignore_errors_in
],
dot_pyre_directory=self.dot_pyre_directory,
excludes=self.excludes,
extensions=self.extensions,
ide_features=self.ide_features,
ignore_all_errors=[
expand_relative_path(root, path) for path in self.ignore_all_errors
],
isolation_prefix=self.isolation_prefix,
logger=logger,
number_of_workers=self.number_of_workers,
oncall=self.oncall,
other_critical_files=[
expand_relative_path(root, path) for path in self.other_critical_files
],
pysa_version_hash=self.pysa_version_hash,
python_version=self.python_version,
search_path=[path.expand_relative_root(root) for path in self.search_path],
shared_memory=self.shared_memory,
site_package_search_strategy=self.site_package_search_strategy,
site_roots=self.site_roots,
source_directories=source_directories,
strict=self.strict,
taint_models_path=[
expand_relative_path(root, path) for path in self.taint_models_path
],
targets=self.targets,
typeshed=typeshed,
unwatched_dependency=unwatched_dependency,
use_buck2=self.use_buck2,
version_hash=self.version_hash,
)
def merge_partial_configurations(
base: PartialConfiguration, override: PartialConfiguration
) -> PartialConfiguration:
try:
# pyre-ignore[16]: Pyre does not understand `dataclass_merge`
return PartialConfiguration.merge(base, override)
except dataclasses_merge.DataclassMergeError as error:
raise exceptions.InvalidConfiguration(str(error))
@dataclass(frozen=True)
class Configuration:
project_root: str
dot_pyre_directory: Path
binary: Optional[str] = None
buck_mode: Optional[platform_aware.PlatformAware[str]] = None
do_not_ignore_errors_in: Sequence[str] = field(default_factory=list)
excludes: Sequence[str] = field(default_factory=list)
extensions: Sequence[ExtensionElement] = field(default_factory=list)
ide_features: Optional[ide_features_module.IdeFeatures] = None
ignore_all_errors: Sequence[str] = field(default_factory=list)
isolation_prefix: Optional[str] = None
logger: Optional[str] = None
number_of_workers: Optional[int] = None
oncall: Optional[str] = None
other_critical_files: Sequence[str] = field(default_factory=list)
pysa_version_hash: Optional[str] = None
python_version: Optional[python_version_module.PythonVersion] = None
relative_local_root: Optional[str] = None
search_path: Sequence[search_path_module.RawElement] = field(default_factory=list)
shared_memory: shared_memory_module.SharedMemory = (
shared_memory_module.SharedMemory()
)
site_package_search_strategy: site_packages.SearchStrategy = (
site_packages.SearchStrategy.NONE
)
site_roots: Optional[Sequence[str]] = None
source_directories: Optional[Sequence[search_path_module.RawElement]] = None
strict: bool = False
taint_models_path: Sequence[str] = field(default_factory=list)
targets: Optional[Sequence[str]] = None
typeshed: Optional[str] = None
unwatched_dependency: Optional[unwatched.UnwatchedDependency] = None
use_buck2: bool = False
version_hash: Optional[str] = None
@staticmethod
def from_partial_configuration(
project_root: Path,
relative_local_root: Optional[str],
partial_configuration: PartialConfiguration,
) -> "Configuration":
search_path = partial_configuration.search_path
return Configuration(
project_root=str(project_root),
dot_pyre_directory=_get_optional_value(
partial_configuration.dot_pyre_directory, project_root / LOG_DIRECTORY
),
binary=partial_configuration.binary,
buck_mode=partial_configuration.buck_mode,
do_not_ignore_errors_in=partial_configuration.do_not_ignore_errors_in,
excludes=partial_configuration.excludes,
extensions=partial_configuration.extensions,
ide_features=partial_configuration.ide_features,
ignore_all_errors=partial_configuration.ignore_all_errors,
isolation_prefix=partial_configuration.isolation_prefix,
logger=partial_configuration.logger,
number_of_workers=partial_configuration.number_of_workers,
oncall=partial_configuration.oncall,
other_critical_files=partial_configuration.other_critical_files,
pysa_version_hash=partial_configuration.pysa_version_hash,
python_version=partial_configuration.python_version,
relative_local_root=relative_local_root,
search_path=[
path.expand_global_root(str(project_root)) for path in search_path
],
shared_memory=partial_configuration.shared_memory,
site_package_search_strategy=partial_configuration.site_package_search_strategy
or site_packages.SearchStrategy.NONE,
site_roots=partial_configuration.site_roots,
source_directories=partial_configuration.source_directories,
strict=_get_optional_value(partial_configuration.strict, default=False),
taint_models_path=partial_configuration.taint_models_path,
targets=partial_configuration.targets,
typeshed=partial_configuration.typeshed,
unwatched_dependency=partial_configuration.unwatched_dependency,
use_buck2=_get_optional_value(
partial_configuration.use_buck2, default=False
),
version_hash=partial_configuration.version_hash,
)
@property
def log_directory(self) -> str:
if self.relative_local_root is None:
return str(self.dot_pyre_directory)
return str(self.dot_pyre_directory / self.relative_local_root)
@property
def local_root(self) -> Optional[str]:
if self.relative_local_root is None:
return None
return os.path.join(self.project_root, self.relative_local_root)
def to_json(self) -> Dict[str, object]:
"""
This method is for display purpose only. Do *NOT* expect this method
to produce JSONs that can be de-serialized back into configurations.
"""
binary = self.binary
buck_mode = self.buck_mode
isolation_prefix = self.isolation_prefix
logger = self.logger
number_of_workers = self.number_of_workers
oncall = self.oncall
pysa_version_hash = self.pysa_version_hash
python_version = self.python_version
relative_local_root = self.relative_local_root
source_directories = self.source_directories
site_package_search_strategy = self.site_package_search_strategy
site_roots = self.site_roots
targets = self.targets
typeshed = self.typeshed
unwatched_dependency = self.unwatched_dependency
version_hash = self.version_hash
return {
"global_root": self.project_root,
"dot_pyre_directory": str(self.dot_pyre_directory),
**({"binary": binary} if binary is not None else {}),
**({"buck_mode": buck_mode.to_json()} if buck_mode is not None else {}),
"do_not_ignore_errors_in": list(self.do_not_ignore_errors_in),
"excludes": list(self.excludes),
"extensions": list(self.extensions),
"ignore_all_errors": list(self.ignore_all_errors),
**(
{"isolation_prefix": isolation_prefix}
if isolation_prefix is not None
else {}
),
**({"logger": logger} if logger is not None else {}),
**({"oncall": oncall} if oncall is not None else {}),
**({"workers": number_of_workers} if number_of_workers is not None else {}),
"other_critical_files": list(self.other_critical_files),
**(
{"pysa_version_hash": pysa_version_hash}
if pysa_version_hash is not None
else {}
),
**(
{"python_version": python_version.to_string()}
if python_version is not None
else {}
),
**(
{"relative_local_root": relative_local_root}
if relative_local_root is not None
else {}
),
"search_path": [str(path) for path in self.search_path],
**(
{"shared_memory": self.shared_memory.to_json()}
if self.shared_memory != shared_memory_module.SharedMemory()
else {}
),
**(
{"site_package_search_strategy": site_package_search_strategy}
if site_package_search_strategy is not None
else {}
),
"site_roots": site_roots if site_roots is not None else [],
**(
{"source_directories": [str(path) for path in source_directories]}
if source_directories is not None
else {}
),
"strict": self.strict,
"taint_models_path": list(self.taint_models_path),
**({"targets": list(targets)} if targets is not None else {}),
**({"typeshed": typeshed} if typeshed is not None else {}),
**(
{"unwatched_dependency": unwatched_dependency.to_json()}
if unwatched_dependency is not None
else {}
),
"use_buck2": self.use_buck2,
**({"version_hash": version_hash} if version_hash is not None else {}),
}
def get_existent_unwatched_dependency(
self,
) -> Optional[unwatched.UnwatchedDependency]:
unwatched_dependency = self.unwatched_dependency
if unwatched_dependency is None:
return None
unwatched_root = Path(unwatched_dependency.files.root)
try:
if not unwatched_root.is_dir():
LOG.warning(
"Nonexistent directory passed in to `unwatched_dependency`: "
f"`{unwatched_root}`"
)
return None
checksum_path = unwatched_root / unwatched_dependency.files.checksum_path
if not checksum_path.is_file():
LOG.warning(
"Nonexistent file passed in to `unwatched_dependency`: "
f"`{checksum_path}`"
)
return None
return self.unwatched_dependency
except PermissionError as error:
LOG.warning(str(error))
return None
def get_site_roots(self) -> Sequence[str]:
site_roots = self.site_roots
if site_roots is not None:
return site_roots
return get_default_site_roots()
def expand_and_get_existent_search_paths(
self,
) -> List[search_path_module.Element]:
site_roots = self.get_site_roots()
existent_paths = search_path_module.process_raw_elements(
self.search_path, site_roots
)
site_packages_paths = site_packages.search_for_paths(
self.site_package_search_strategy, site_roots
)
typeshed_root = self.get_typeshed_respecting_override()
if typeshed_root is None:
return existent_paths + site_packages_paths
typeshed_paths: List[search_path_module.Element] = [
search_path_module.SimpleElement(str(element))
for element in find_directories.find_typeshed_search_paths(
Path(typeshed_root)
)
]
return existent_paths + site_packages_paths + typeshed_paths
def expand_and_get_existent_source_directories(
self,
) -> List[search_path_module.Element]:
source_directories = self.source_directories
if source_directories is not None:
return search_path_module.process_raw_elements(
source_directories, self.get_site_roots()
)
else:
return []
def get_existent_do_not_ignore_errors_in_paths(self) -> List[str]:
"""
This is a separate method because we want to check for existing files
at the time this is called, not when the configuration is
constructed.
"""
ignore_paths = [
expand_global_root(path, global_root=self.project_root)
for path in self.do_not_ignore_errors_in
]
paths = []
for path in ignore_paths:
if os.path.exists(path):
paths.append(path)
else:
LOG.debug(
"Filtering out nonexistent paths in `do_not_ignore_errors_in`: "
f"{path}"
)
return paths
def get_existent_ignore_all_errors_paths(self) -> List[str]:
"""
This is a separate method because we want to check for existing files
at the time this is called, not when the configuration is
constructed.
"""
return _expand_and_get_existent_ignore_all_errors_path(
self.ignore_all_errors, self.project_root
)
def get_binary_respecting_override(self) -> Optional[str]:
binary = self.binary
if binary is not None:
return binary
LOG.info(f"No binary specified, looking for `{BINARY_NAME}` in PATH")
binary_candidate = shutil.which(BINARY_NAME)
if binary_candidate is None:
binary_candidate_name = os.path.join(
os.path.dirname(sys.argv[0]), BINARY_NAME
)
binary_candidate = shutil.which(binary_candidate_name)
if binary_candidate is not None:
return binary_candidate
return None
def get_typeshed_respecting_override(self) -> Optional[str]:
typeshed = self.typeshed
if typeshed is not None:
return typeshed
LOG.info("No typeshed specified, looking for it...")
auto_determined_typeshed = find_directories.find_typeshed()
if auto_determined_typeshed is None:
LOG.warning(
"Could not find a suitable typeshed. Types for Python builtins "
"and standard libraries may be missing!"
)
return None
else:
LOG.info(f"Found: `{auto_determined_typeshed}`")
return str(auto_determined_typeshed)
def get_version_hash_respecting_override(self) -> Optional[str]:
overriding_version_hash = os.getenv("PYRE_VERSION_HASH")
if overriding_version_hash:
LOG.warning(f"Version hash overridden with `{overriding_version_hash}`")
return overriding_version_hash
return self.version_hash
def get_binary_version(self) -> Optional[str]:
binary = self.get_binary_respecting_override()
if binary is None:
return None
status = subprocess.run(
[binary, "-version"], stdout=subprocess.PIPE, universal_newlines=True
)
return status.stdout.strip() if status.returncode == 0 else None
def get_number_of_workers(self) -> int:
number_of_workers = self.number_of_workers
if number_of_workers is not None and number_of_workers > 0:
return number_of_workers
# pyre-fixme[28]: Unexpected keyword argument `logical`.
number_of_physical_cores = psutil.cpu_count(logical=False)
if number_of_physical_cores is None:
default_number_of_workers = 1
else:
default_number_of_workers = max(1, number_of_physical_cores - 1)
LOG.info(
"Could not determine the number of Pyre workers from configuration. "
f"Auto-set the value to {default_number_of_workers}."
)
if default_number_of_workers <= 1:
LOG.info(
"Consider setting the `--sequential` flag instead when the number "
"of parallel workers is not greater than 1."
)
return default_number_of_workers
def is_hover_enabled(self) -> bool:
if self.ide_features is None:
return ide_features_module.IdeFeatures.DEFAULT_HOVER_ENABLED
return self.ide_features.is_hover_enabled()
def is_go_to_definition_enabled(self) -> bool:
if self.ide_features is None:
return ide_features_module.IdeFeatures.DEFAULT_GO_TO_DEFINITION_ENABLED
return self.ide_features.is_go_to_definition_enabled()
def is_find_symbols_enabled(self) -> bool:
if self.ide_features is None:
return ide_features_module.IdeFeatures.DEFAULT_FIND_SYMBOLS_ENABLED
return self.ide_features.is_find_symbols_enabled()
def get_valid_extension_suffixes(self) -> List[str]:
vaild_extensions = []
for extension in self.extensions:
if not extension.suffix.startswith("."):
LOG.warning(
"Filtering out extension which does not start with `.`: "
f"`{extension.suffix}`"
)
else:
vaild_extensions.append(extension.command_line_argument())
return vaild_extensions
def get_isolation_prefix_respecting_override(self) -> Optional[str]:
"""We need this to disable an isolation prefix set in a configuration.
Merely omitting the CLI flag would not disable the isolation prefix
because we would just fall back to the configuration value.
With this, we can pass `--isolation-prefix ''` as a CLI argument or
override `isolation_prefix` as `""` in a local configuration."""
return None if self.isolation_prefix == "" else self.isolation_prefix
def get_python_version(self) -> python_version_module.PythonVersion:
python_version = self.python_version
if python_version is not None:
return python_version
else:
version_info = sys.version_info
return python_version_module.PythonVersion(
major=version_info.major,
minor=version_info.minor,
micro=version_info.micro,
)
def create_configuration(
arguments: command_arguments.CommandArguments, base_directory: Path
) -> Configuration:
local_root_argument = arguments.local_configuration
search_base = (
base_directory
if local_root_argument is None
else base_directory / local_root_argument
)
found_root = find_directories.find_global_and_local_root(search_base)
# If the local root was explicitly specified but does not exist, return an
# error instead of falling back to current directory.
if local_root_argument is not None:
if found_root is None:
raise exceptions.InvalidConfiguration(
"A local configuration path was explicitly specified, but no"
+ f" {CONFIGURATION_FILE} file was found in {search_base}"
+ " or its parents."
)
elif found_root.local_root is None:
raise exceptions.InvalidConfiguration(
"A local configuration path was explicitly specified, but no"
+ f" {LOCAL_CONFIGURATION_FILE} file was found in {search_base}"
+ " or its parents."
)
command_argument_configuration = PartialConfiguration.from_command_arguments(
arguments
).expand_relative_paths(str(Path.cwd()))
if found_root is None:
project_root = Path.cwd()
relative_local_root = None
partial_configuration = command_argument_configuration
else:
project_root = found_root.global_root
relative_local_root = None
partial_configuration = PartialConfiguration.from_file(
project_root / CONFIGURATION_FILE
).expand_relative_paths(str(project_root))
local_root = found_root.local_root
if local_root is not None:
relative_local_root = get_relative_local_root(project_root, local_root)
partial_configuration = merge_partial_configurations(
base=partial_configuration,
override=PartialConfiguration.from_file(
local_root / LOCAL_CONFIGURATION_FILE
).expand_relative_paths(str(local_root)),
)
partial_configuration = merge_partial_configurations(
base=partial_configuration,
override=command_argument_configuration,
)
return Configuration.from_partial_configuration(
project_root, relative_local_root, partial_configuration
)
def check_nested_local_configuration(configuration: Configuration) -> None:
"""
Raises `InvalidConfiguration` if the check fails.
"""
local_root = configuration.local_root
if local_root is None:
return
def is_subdirectory(child: Path, parent: Path) -> bool:
return parent == child or parent in child.parents
# We search from the parent of the local root, looking for another local
# configuration file that lives above the current one
local_root_path = Path(local_root).resolve()
current_directory = local_root_path.parent
while True:
found_root = find_directories.find_global_and_local_root(current_directory)
if found_root is None:
break
nesting_local_root = found_root.local_root
if nesting_local_root is None:
break
nesting_configuration = PartialConfiguration.from_file(
nesting_local_root / LOCAL_CONFIGURATION_FILE
).expand_relative_paths(str(nesting_local_root))
nesting_ignored_all_errors_path = (
_expand_and_get_existent_ignore_all_errors_path(
nesting_configuration.ignore_all_errors, str(found_root.global_root)
)
)
if not any(
is_subdirectory(child=local_root_path, parent=Path(path))
for path in nesting_ignored_all_errors_path
):
error_message = (
"Local configuration is nested under another local configuration at "
f"`{nesting_local_root}`.\nPlease add `{local_root_path}` to the "
"`ignore_all_errors` field of the parent, or combine the sources "
"into a single configuration, or split the parent configuration to "
"avoid inconsistent errors."
)
raise exceptions.InvalidConfiguration(error_message)
current_directory = nesting_local_root.parent | 0.720172 | 0.126488 |
import math, os, signal, hashlib
from glob import glob
from filechunkio import FileChunkIO
from boto import connect_s3
from boto.s3.key import Key
from gevent import sleep
from gevent.pool import Pool
from gevent.event import Event
from ufyr.decorators import retry
CHUNK_SIZE = 80000000 #10Mb
pool = Pool(4)
s3 = connect_s3()
bucket = s3.get_bucket('i-haz-a-bucket')
closure = Event()
def signal_handler(*args):
'''
Function container to catch SIGTERM and translate
it to a graceful closure
'''
print "SETTING CLOSURE", closure
closure.set()
def save_pid():
with open('/tmp/uploader.pid', 'w') as f:
f.write(str(os.getpid()))
def do_upload(_file):
'''
Given a filepath, upload it using the appropriate s3 method
'''
if closure.is_set():
return
if os.path.isfile(_file):
if os.stat(_file).st_size <= CHUNK_SIZE:
upload_file(_file)
else:
upload_large_file(_file)
def traverse_file_path(filepath):
'''
Given a filepath, yield files in that path.
Is recursive, will only return files.
'''
if closure.is_set():
raise StopIteration
files = glob(os.path.join(filepath, '*'))
for _file in files:
if os.path.isfile(_file) and bucket.get_key(_file) is None:
yield _file
else:
for __file in traverse_file_path(_file):
yield __file
@retry(limit=3, interval=(90, 120))
def upload_file(filepath):
'''
Simple upload - straight out of the boto docs
'''
print 'UPLOAD SMALL FILE', filepath
md5 = md5_from_file(filepath)
key = Key(bucket, filepath)
key.set_contents_from_filename(filepath)
print 'UPLOAD COMPLETE', filepath
return '"%s"'%md5 == key.etag
@retry(limit=3, interval=(90, 120))
def upload_large_file(filepath):
'''
Big upload - also straight out of the docs
'''
print 'UPLOAD LARGE FILE', filepath
uploader = bucket.initiate_multipart_upload(filepath)
hashval = b''
_i = 0
try:
file_size = os.stat(filepath).st_size
chunk_count = int(math.ceil(file_size/CHUNK_SIZE))
for i in range(chunk_count + 1):
offset = CHUNK_SIZE * i
_bytes = min(CHUNK_SIZE, file_size - offset)
with FileChunkIO(filepath, 'r', offset=offset, bytes=_bytes) as fp:
hashval += hashlib.md5(fp.read()).digest()
fp.seek(0)
print str((float(CHUNK_SIZE) * i / float(file_size))*100) + '% complete\r'
uploader.upload_part_from_file(fp, part_num=i+1)
_i += 1
uploader.complete_upload()
key = bucket.get_key(uploader.key_name)
print 'UPLOAD COMPLETE', filepath
return key.etag == '"%s-%d"'%(hashlib.md5(hashval).hexdigest(), _i)
except:
uploader.cancel_upload()
raise
def md5_from_file(_file):
return hashlib.md5(_get_file_contents(_file)).hexdigest()
def _get_file_contents(_file):
with open(_file, 'rb') as f:
return f.read()
if __name__ == '__main__':
save_pid()
signal.signal(signal.SIGTERM, signal_handler)
pool.map(do_upload, traverse_file_path(argv[1])) | ufyr/s3_uploader.py | import math, os, signal, hashlib
from glob import glob
from filechunkio import FileChunkIO
from boto import connect_s3
from boto.s3.key import Key
from gevent import sleep
from gevent.pool import Pool
from gevent.event import Event
from ufyr.decorators import retry
CHUNK_SIZE = 80000000 #10Mb
pool = Pool(4)
s3 = connect_s3()
bucket = s3.get_bucket('i-haz-a-bucket')
closure = Event()
def signal_handler(*args):
'''
Function container to catch SIGTERM and translate
it to a graceful closure
'''
print "SETTING CLOSURE", closure
closure.set()
def save_pid():
with open('/tmp/uploader.pid', 'w') as f:
f.write(str(os.getpid()))
def do_upload(_file):
'''
Given a filepath, upload it using the appropriate s3 method
'''
if closure.is_set():
return
if os.path.isfile(_file):
if os.stat(_file).st_size <= CHUNK_SIZE:
upload_file(_file)
else:
upload_large_file(_file)
def traverse_file_path(filepath):
'''
Given a filepath, yield files in that path.
Is recursive, will only return files.
'''
if closure.is_set():
raise StopIteration
files = glob(os.path.join(filepath, '*'))
for _file in files:
if os.path.isfile(_file) and bucket.get_key(_file) is None:
yield _file
else:
for __file in traverse_file_path(_file):
yield __file
@retry(limit=3, interval=(90, 120))
def upload_file(filepath):
'''
Simple upload - straight out of the boto docs
'''
print 'UPLOAD SMALL FILE', filepath
md5 = md5_from_file(filepath)
key = Key(bucket, filepath)
key.set_contents_from_filename(filepath)
print 'UPLOAD COMPLETE', filepath
return '"%s"'%md5 == key.etag
@retry(limit=3, interval=(90, 120))
def upload_large_file(filepath):
'''
Big upload - also straight out of the docs
'''
print 'UPLOAD LARGE FILE', filepath
uploader = bucket.initiate_multipart_upload(filepath)
hashval = b''
_i = 0
try:
file_size = os.stat(filepath).st_size
chunk_count = int(math.ceil(file_size/CHUNK_SIZE))
for i in range(chunk_count + 1):
offset = CHUNK_SIZE * i
_bytes = min(CHUNK_SIZE, file_size - offset)
with FileChunkIO(filepath, 'r', offset=offset, bytes=_bytes) as fp:
hashval += hashlib.md5(fp.read()).digest()
fp.seek(0)
print str((float(CHUNK_SIZE) * i / float(file_size))*100) + '% complete\r'
uploader.upload_part_from_file(fp, part_num=i+1)
_i += 1
uploader.complete_upload()
key = bucket.get_key(uploader.key_name)
print 'UPLOAD COMPLETE', filepath
return key.etag == '"%s-%d"'%(hashlib.md5(hashval).hexdigest(), _i)
except:
uploader.cancel_upload()
raise
def md5_from_file(_file):
return hashlib.md5(_get_file_contents(_file)).hexdigest()
def _get_file_contents(_file):
with open(_file, 'rb') as f:
return f.read()
if __name__ == '__main__':
save_pid()
signal.signal(signal.SIGTERM, signal_handler)
pool.map(do_upload, traverse_file_path(argv[1])) | 0.296043 | 0.101768 |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
FIN = './data/model/model_%d.npy'
FOUT = './results/rescaled_contacts.svg'
P = np.array([0.93,0.91])
S = np.array([0.72,0.81])
F = np.array([0.33,0.23])
#VAR = 4*(F*P*(1-P) + (1-F)*S*(1-S))
VAR = np.array([1,1])
mpl.rcParams['xtick.labelsize'] = 7
mpl.rcParams['ytick.labelsize'] = 5
def build_data(M,idx):
D = np.zeros((4,4,))
for (i,j) in enumerate(range(4,0,-1)):
D[i,:] = M[j][idx]
D[1,3:] = 0
D[2,2:] = 0
D[3,1:] = 0
return D
def predicted(D):
scale = D.sum(axis=1)
print(D)
print('scale',scale)
print('scale',scale[:,None],scale[0])
R = np.tile(D[0,:],(4,1)) * scale[:,None] / scale[0]
R[D==0] = 0
return R
def plot_rescale(ax,A,R,err,width=0.25,width2=0.23,labels=None,ylabel=[None,None,None]):
aerr = np.sqrt(A*err)
rerr = np.sqrt(R*err)
for (j,i) in enumerate(range(1,4)):
#print(f'axis: {j}')
#print('actual',A[i,:])
#print('predicted',R[i,:])
#print('actual_err',aerr)
#print('pred_err',rerr)
_plot_rescale(ax[j],A[i,:],R[i,:],aerr[i,:],rerr[i,:],labels=labels,ylabel=ylabel[j])
def _plot_rescale(ax,data,pred,err1,err2,width=0.25,width2=0.23,labels=None,ylabel='#contacts'):
ind = np.arange(4)
#ax.grid(zorder=-1)
ax.bar(ind-0.5*width,data,yerr=err1,width=width,ecolor='black', capsize=1,label='Empirical')
ax.bar(ind+0.5*width,pred,yerr=err2,width=width2,ecolor='black', capsize=1,label='Predicted')
ax.set_xlim([-0.5,3.5])
ax.set_xticks(ind)
if labels: ax.set_xticklabels(labels)
ax.legend(fontsize=6)
ax.set_ylabel('# contacts',fontsize=7)
ax.set_ylabel('# contacts',fontsize=7)
if ylabel: ax.set_title(ylabel,fontsize=7)
def run(cfg,fout=None,source_data=None):
M = dict([(i,np.load(FIN%i)) for i in range(1,5)])
print(VAR)
#print('M3',M[3])
#print('M4',M[4])
for i in range(1,5): print(f'M{i}',M[i])
C = build_data(M,1)
RC = predicted(C)
print('RC',RC)
G = build_data(M,2)
RG = predicted(G)
_label = ['$\mathbb{%s}^1$','$\mathbb{%s}^2$','$\mathbb{%s}^3$','$\mathbb{%s}^4$']
clabel = [l%'C' for l in _label]
glabel = [l%'G' for l in _label]
#s = '# contacts scaled by %s'
s = 'scaled by %s'
ylabel = [s%'$\mathbb{M}^3$',s%'$\mathbb{M}^2$',s%'$\mathbb{M}^1$']
fig,ax = plt.subplots(3,2,figsize=(3.5,4))
plot_rescale(ax[:,0],C,RC,VAR[0],labels=clabel,ylabel=ylabel)
plot_rescale(ax[:,1],G,RG,VAR[1],labels=glabel,ylabel=ylabel)
#ax[0,0].set_title('Chemical synapses',fontsize=7)
#ax[0,1].set_title('Gap junctions',fontsize=7)
plt.tight_layout()
plt.savefig(FOUT)
plt.show()
if __name__=="__main__":
run(1) | scripts/model_rescale.py | import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
FIN = './data/model/model_%d.npy'
FOUT = './results/rescaled_contacts.svg'
P = np.array([0.93,0.91])
S = np.array([0.72,0.81])
F = np.array([0.33,0.23])
#VAR = 4*(F*P*(1-P) + (1-F)*S*(1-S))
VAR = np.array([1,1])
mpl.rcParams['xtick.labelsize'] = 7
mpl.rcParams['ytick.labelsize'] = 5
def build_data(M,idx):
D = np.zeros((4,4,))
for (i,j) in enumerate(range(4,0,-1)):
D[i,:] = M[j][idx]
D[1,3:] = 0
D[2,2:] = 0
D[3,1:] = 0
return D
def predicted(D):
scale = D.sum(axis=1)
print(D)
print('scale',scale)
print('scale',scale[:,None],scale[0])
R = np.tile(D[0,:],(4,1)) * scale[:,None] / scale[0]
R[D==0] = 0
return R
def plot_rescale(ax,A,R,err,width=0.25,width2=0.23,labels=None,ylabel=[None,None,None]):
aerr = np.sqrt(A*err)
rerr = np.sqrt(R*err)
for (j,i) in enumerate(range(1,4)):
#print(f'axis: {j}')
#print('actual',A[i,:])
#print('predicted',R[i,:])
#print('actual_err',aerr)
#print('pred_err',rerr)
_plot_rescale(ax[j],A[i,:],R[i,:],aerr[i,:],rerr[i,:],labels=labels,ylabel=ylabel[j])
def _plot_rescale(ax,data,pred,err1,err2,width=0.25,width2=0.23,labels=None,ylabel='#contacts'):
ind = np.arange(4)
#ax.grid(zorder=-1)
ax.bar(ind-0.5*width,data,yerr=err1,width=width,ecolor='black', capsize=1,label='Empirical')
ax.bar(ind+0.5*width,pred,yerr=err2,width=width2,ecolor='black', capsize=1,label='Predicted')
ax.set_xlim([-0.5,3.5])
ax.set_xticks(ind)
if labels: ax.set_xticklabels(labels)
ax.legend(fontsize=6)
ax.set_ylabel('# contacts',fontsize=7)
ax.set_ylabel('# contacts',fontsize=7)
if ylabel: ax.set_title(ylabel,fontsize=7)
def run(cfg,fout=None,source_data=None):
M = dict([(i,np.load(FIN%i)) for i in range(1,5)])
print(VAR)
#print('M3',M[3])
#print('M4',M[4])
for i in range(1,5): print(f'M{i}',M[i])
C = build_data(M,1)
RC = predicted(C)
print('RC',RC)
G = build_data(M,2)
RG = predicted(G)
_label = ['$\mathbb{%s}^1$','$\mathbb{%s}^2$','$\mathbb{%s}^3$','$\mathbb{%s}^4$']
clabel = [l%'C' for l in _label]
glabel = [l%'G' for l in _label]
#s = '# contacts scaled by %s'
s = 'scaled by %s'
ylabel = [s%'$\mathbb{M}^3$',s%'$\mathbb{M}^2$',s%'$\mathbb{M}^1$']
fig,ax = plt.subplots(3,2,figsize=(3.5,4))
plot_rescale(ax[:,0],C,RC,VAR[0],labels=clabel,ylabel=ylabel)
plot_rescale(ax[:,1],G,RG,VAR[1],labels=glabel,ylabel=ylabel)
#ax[0,0].set_title('Chemical synapses',fontsize=7)
#ax[0,1].set_title('Gap junctions',fontsize=7)
plt.tight_layout()
plt.savefig(FOUT)
plt.show()
if __name__=="__main__":
run(1) | 0.195057 | 0.413951 |
from collections import namedtuple
from operator import add, contains
from typing import AbstractSet, Callable, Tuple
from unittest.mock import patch
import numpy as np
import pandas as pd
import pytest
from ... import expressions as exp, frontend
from ...datalog import DatalogProgram, Fact, Implication
from ...exceptions import NeuroLangException, WrongArgumentsInPredicateError
from ...expression_walker import ExpressionBasicEvaluator
from ...regions import ExplicitVBR, SphericalVolume
from ...type_system import Unknown
from .. import query_resolution, query_resolution_expressions as qre
def test_symbol_management():
class Solver(DatalogProgram, ExpressionBasicEvaluator):
pass
neurolang = query_resolution.QueryBuilderBase(Solver())
sym = neurolang.new_symbol(int)
assert sym.expression.type is int
sym_ = neurolang.new_symbol(type_=(float, int))
assert sym_.expression.type is Tuple[float, int]
assert sym.expression.name != sym_.expression.name
a = neurolang.new_symbol(int, name="a")
assert a.expression.name == "a"
b = neurolang.add_symbol(1, name="b")
assert "b" in neurolang.symbols
assert b.value == 1
assert b.type is int
assert neurolang.symbols.b == b
with pytest.raises(AttributeError):
assert neurolang.symbols.c
@neurolang.add_symbol
def id(x: int) -> int:
return x
assert "id" in neurolang.symbols
assert id == neurolang.symbols.id
assert id == neurolang.symbols["id"]
assert id.type == Callable[[int], int]
f = neurolang.new_symbol()
new_expression = f(..., 1)
assert isinstance(new_expression.expression, exp.FunctionApplication)
assert new_expression.expression.functor == f.expression
assert isinstance(new_expression.expression.args[0], exp.Symbol)
assert new_expression.expression.args[0].is_fresh
assert isinstance(new_expression.expression.args[1], exp.Constant)
assert new_expression.expression.args[1].value == 1
def test_symbol_environment():
class Solver(DatalogProgram, ExpressionBasicEvaluator):
pass
neurolang = query_resolution.QueryBuilderBase(Solver())
b = neurolang.add_symbol(1, name="b")
neurolang.symbols._dynamic_mode = True
assert "c" not in neurolang.symbols
c = neurolang.symbols.c
assert c.type is Unknown
assert c.expression.name == "c"
del neurolang.symbols.b
assert b not in neurolang.symbols
neurolang.symbols._dynamic_mode = False
with neurolang.environment as e:
assert "c" not in e
c = e.c
assert c.type is Unknown
assert c.expression.name == "c"
e.d = 5
assert e.d.value == 5
assert e.d.type is int
assert neurolang.symbols.d.value == 5
assert neurolang.symbols.d.type is int
with neurolang.scope as e:
assert "f" not in e
f = e.f
assert f.type is Unknown
assert f.expression.name == "f"
e.g = 5
assert e.g.value == 5
assert e.g.type is int
assert "f" not in neurolang.symbols
assert "g" not in neurolang.symbols
def test_add_set():
neurolang = frontend.NeurolangDL()
s = neurolang.add_tuple_set(range(10), int)
res = neurolang[s]
assert s.type is AbstractSet[int]
assert res.type is AbstractSet[int]
assert res.value == frozenset((i,) for i in range(10))
assert isinstance(repr(res), str)
v = frozenset(zip(("a", "b", "c"), range(3)))
s = neurolang.add_tuple_set(v, (str, int))
res = neurolang[s]
assert s.type is AbstractSet[Tuple[str, int]]
assert res.type is AbstractSet[Tuple[str, int]]
assert res.value == v
assert isinstance(repr(res), str)
v = pd.DataFrame([[0, 's', .1], [1, 'p', .3]], columns=['A', 'B', 'C'])
s = neurolang.add_tuple_set(v)
res = neurolang[s]
assert s.type is AbstractSet[Tuple[int, str, float]]
assert res.type is AbstractSet[Tuple[int, str, float]]
assert res.value == set(v.itertuples(index=False))
assert isinstance(repr(res), str)
def test_add_set_neurolangdl():
neurolang = frontend.NeurolangDL()
s = neurolang.add_tuple_set(range(10), int)
res = neurolang[s]
assert s.type is AbstractSet[int]
assert res.type is AbstractSet[int]
assert res.value == frozenset((i,) for i in range(10))
v = frozenset(zip(("a", "b", "c"), range(3)))
s = neurolang.add_tuple_set(v, (str, int))
res = neurolang[s]
assert s.type is AbstractSet[Tuple[str, int]]
assert res.type is AbstractSet[Tuple[str, int]]
assert res.value == v
def test_query_regions_from_region_set():
neurolang = frontend.NeurolangDL()
central = ExplicitVBR(np.array([[0, 0, 5], [1, 1, 8]]), np.eye(4))
i1 = ExplicitVBR(np.array([[0, 0, 2], [1, 1, 3]]), np.eye(4))
i2 = ExplicitVBR(np.array([[0, 0, -1], [1, 1, 2]]), np.eye(4))
i3 = ExplicitVBR(np.array([[0, 0, -10], [1, 1, -5]]), np.eye(4))
regions_ = {(i1,), (i2,), (i3,)}
regions = neurolang.add_tuple_set(regions_)
x = neurolang.new_region_symbol(name="x")
query_result = neurolang.query(
(x,), regions(x) & neurolang.symbols.inferior_of(x, central)
)
assert len(query_result) == len(regions)
assert query_result.to_unnamed() == {(i1,), (i2,), (i3,)}
def test_query_new_predicate():
neurolang = frontend.NeurolangDL()
central = ExplicitVBR(np.array([[0, 0, 5], [1, 1, 8]]), np.eye(4))
inferior_posterior = ExplicitVBR(
np.array([[0, -10, -10], [1, -5, -5]]), np.eye(4)
)
inferior_central = ExplicitVBR(
np.array([[0, 0, -1], [1, 1, 2]]), np.eye(4)
)
inferior_anterior = ExplicitVBR(
np.array([[0, 2, 2], [1, 5, 3]]), np.eye(4)
)
regions_ = {
(inferior_posterior,),
(inferior_central,),
(inferior_anterior,),
}
regions = neurolang.add_tuple_set(regions_)
def posterior_and_inferior(y, z):
return neurolang.symbols.anatomical_posterior_of(
y, z
) & neurolang.symbols.anatomical_inferior_of(y, z)
x = neurolang.new_region_symbol(name="x")
query_result = neurolang.query(
(x,), regions(x) & posterior_and_inferior(x, central)
)
assert len(query_result) == 1
assert next(iter(query_result)) == (inferior_posterior,)
def test_query_single_symbol():
neurolang = frontend.NeurolangDL()
s = neurolang.add_tuple_set(
[(i,) for i in range(5)]
)
with neurolang.scope as e:
e.q[e.x] = s(e.x)
res = neurolang.query(e.x, e.q(e.x))
assert res.to_unnamed() == {(i,) for i in range(5)}
def test_query_wrong_head_arguments():
neurolang = frontend.NeurolangDL()
s = neurolang.add_tuple_set(
[(i, i) for i in range(5)]
)
with neurolang.scope as e:
e.q[e.x, e.y] = s(e.x, e.y)
with pytest.raises(WrongArgumentsInPredicateError):
neurolang.query((e.x, e.y, e.z), e.q(e.x, e.y, e.z))
with pytest.raises(NeuroLangException):
neurolang.query((e.x, e.y, e.z), e.q(e.x, e.y))
@pytest.mark.skip()
def test_load_spherical_volume_first_order():
neurolang = frontend.RegionFrontend()
inferior = ExplicitVBR(np.array([[0, 0, 0], [1, 1, 1]]), np.eye(4))
neurolang.add_region(inferior, name="inferior_region")
neurolang.sphere((0, 0, 0), 0.5, name="unit_sphere")
assert neurolang.symbols["unit_sphere"].value == SphericalVolume(
(0, 0, 0), 0.5
)
x = neurolang.new_region_symbol(name="x")
query = neurolang.query(
x, neurolang.symbols.overlapping(x, neurolang.symbols.unit_sphere)
)
query_result = query.do()
assert len(query_result.value) == 1
assert next(iter(query_result.value)) == inferior
neurolang.make_implicit_regions_explicit(np.eye(4), (500, 500, 500))
query = neurolang.query(
x, neurolang.symbols.overlapping(x, neurolang.symbols.unit_sphere)
)
query_result = query.do()
sphere_constant = neurolang.symbols["unit_sphere"].value
assert (
isinstance(sphere_constant, ExplicitVBR)
and np.array_equal(sphere_constant.affine, np.eye(4))
and sphere_constant.image_dim == (500, 500, 500)
and np.array_equal(sphere_constant.voxels, [[0, 0, 0]])
)
assert len(query_result.value) == 1
assert next(iter(query_result.value)) == inferior
def test_load_spherical_volume_datalog():
neurolang = frontend.NeurolangDL()
inferior = ExplicitVBR(np.array([[0, 0, 0], [1, 1, 1]]), np.eye(4))
regions = neurolang.add_tuple_set(
{(inferior, "inferior_region")}, name="regions"
)
neurolang.sphere((0, 0, 0), 0.5, name="unit_sphere")
assert neurolang.symbols["unit_sphere"].value == SphericalVolume(
(0, 0, 0), 0.5
)
q = neurolang.new_symbol()
x = neurolang.new_region_symbol(name="x")
n = neurolang.new_region_symbol(name="n")
query = neurolang.query(
q(x),
neurolang.symbols.overlapping(x, neurolang.symbols.unit_sphere)
& regions(x, n),
)
assert len(query.value) == 1
assert next(iter(query.value))[0] == inferior
neurolang.make_implicit_regions_explicit(np.eye(4), (500, 500, 500))
query = neurolang.query(
q(x),
neurolang.symbols.overlapping(x, neurolang.symbols.unit_sphere)
& regions(x, n),
)
assert len(query.value) == 1
assert next(iter(query.value))[0] == inferior
sphere_constant = neurolang.symbols["unit_sphere"].value
assert (
isinstance(sphere_constant, ExplicitVBR)
and np.array_equal(sphere_constant.affine, np.eye(4))
and sphere_constant.image_dim == (500, 500, 500)
and np.array_equal(sphere_constant.voxels, [[0, 0, 0]])
)
def test_neurolang_dl_query():
neurolang = frontend.NeurolangDL()
r = neurolang.new_symbol(name="r")
x = neurolang.new_symbol(name="x")
y = neurolang.new_symbol(name="y")
z = neurolang.new_symbol(name="z")
dataset = {(i, i * 2) for i in range(10)}
q = neurolang.add_tuple_set(dataset, name="q")
sol = neurolang.query((x, y), q(x, y)).to_unnamed()
assert sol == dataset
sol = neurolang.query(tuple(), q(x, x))
assert sol
assert neurolang.query(q(x, x))
sol = neurolang.query(tuple(), q(100, x))
assert not sol
assert not neurolang.query(q(100, x))
sol = neurolang.query((x,), q(x, y) & q(y, z)).to_unnamed()
res = set((x,) for x in range(5))
assert sol == res
r[x, y] = q(x, y)
r[x, z] = r[x, y] & q(y, z)
sol = neurolang.query((y,), r(1, y)).to_unnamed()
assert sol == set((x,) for x in (2, 4, 8, 16))
def test_neurolang_dl_solve_all():
neurolang = frontend.NeurolangDL()
r = neurolang.new_symbol(name="r")
x = neurolang.new_symbol(name="x")
dataset = {(i, i * 2) for i in range(10)}
q = neurolang.add_tuple_set(dataset, name="q")
r[x] = q(x, x)
sol = neurolang.solve_all()
assert sol["q"].to_unnamed() == dataset
assert sol["r"].to_unnamed() == set((i,) for i, j in dataset if i == j)
assert len(sol) == 2
assert neurolang.predicate_parameter_names(r) == ("x",)
def test_neurolange_dl_get_param_names():
neurolang = frontend.NeurolangDL()
r = neurolang.new_symbol(name="r")
x = neurolang.new_symbol(name="x")
dataset = {(i, i * 2) for i in range(10)}
q = neurolang.add_tuple_set(dataset, name="q")
r[x] = q(x, x)
@neurolang.add_symbol
def test_fun(x: int) -> int:
"""
HELP TEST
"""
return 0
assert neurolang.predicate_parameter_names("q") == ("0", "1")
assert neurolang.predicate_parameter_names(q) == ("0", "1")
assert neurolang.predicate_parameter_names(r) == ("x",)
assert neurolang.symbols[r].predicate_parameter_names == ("x",)
assert r[x].help() is not None
assert neurolang.symbols["test_fun"].help().strip() == "HELP TEST"
def test_neurolange_dl_named_sets():
neurolang = frontend.NeurolangDL()
r = neurolang.new_symbol(name="r")
s = neurolang.new_symbol(name="s")
x = neurolang.new_symbol(name="x")
y = neurolang.new_symbol(name="y")
dataset = {(i, i * 2) for i in range(10)}
q = neurolang.add_tuple_set(dataset, name="q")
r[x] = q(x, x)
s[x, y] = q(x, x) & (y == x)
res = neurolang.solve_all()
assert res["r"].columns == ("x",)
assert res["r"].row_type == Tuple[int]
assert res["r"].to_unnamed() == {(i,) for i, j in dataset if i == j}
def test_neurolange_dl_negation():
neurolang = frontend.NeurolangDL()
s = neurolang.new_symbol(name="s")
x = neurolang.new_symbol(name="x")
y = neurolang.new_symbol(name="y")
dataset = {(i, i * 2) for i in range(10)}
q = neurolang.add_tuple_set(dataset, name="q")
s[x, y] = ~q(x, x) & q(x, y)
res = neurolang.solve_all()
assert res["s"].to_unnamed() == {(i, j) for i, j in dataset if i != j}
def test_neurolang_dl_datalog_code_list_symbols():
neurolang = frontend.NeurolangDL()
original_symbols = set(neurolang.symbols)
neurolang.execute_datalog_program(
"""
A(4, 5)
A(5, 6)
A(6, 5)
B(x,y) :- A(x, y)
B(x,y) :- B(x, z),A(z, y)
C(x) :- B(x, y), y == 5
D("x")
"""
)
assert set(neurolang.symbols) == {"A", "B", "C", "D"} | original_symbols
def test_neurolang_dl_datalog_code():
neurolang = frontend.NeurolangDL()
neurolang.execute_datalog_program(
"""
A(4, 5)
A(5, 6)
A(6, 5)
B(x,y) :- A(x, y)
B(x,y) :- B(x, z),A(z, y)
C(x) :- B(x, y), y == 5
D("x")
"""
)
res = neurolang.solve_all()
assert res["A"].row_type == Tuple[int, int]
assert res["A"].to_unnamed() == {(4, 5), (5, 6), (6, 5)}
assert res["B"].to_unnamed() == {
(4, 5),
(5, 6),
(6, 5),
(4, 6),
(5, 5),
(6, 6),
}
assert res["C"].to_unnamed() == {(4,), (5,), (6,)}
assert res["D"].to_unnamed() == {
("x",),
}
def test_neurolang_dl_aggregation():
neurolang = frontend.NeurolangDL()
q = neurolang.new_symbol(name="q")
p = neurolang.new_symbol(name="p")
r = neurolang.new_symbol(name="r")
x = neurolang.new_symbol(name="x")
y = neurolang.new_symbol(name="y")
@neurolang.add_symbol
def sum_(x):
return sum(x)
for i in range(10):
q[i % 2, i] = True
p[x, sum_(y)] = q[x, y]
sol = neurolang.query(r(x, y), p(x, y))
res_q = {(0, 2 + 4 + 6 + 8), (1, 1 + 3 + 5 + 7 + 9)}
assert len(sol) == 2
assert sol[r] == res_q
assert sol[p] == res_q
def test_neurolang_dl_aggregation_direct_query():
neurolang = frontend.NeurolangDL()
q = neurolang.new_symbol(name="q")
p = neurolang.new_symbol(name="p")
x = neurolang.new_symbol(name="x")
y = neurolang.new_symbol(name="y")
@neurolang.add_symbol
def sum_(x):
return sum(x)
for i in range(10):
q[i % 2, i] = True
p[x, sum_(y)] = q[x, y]
sol = neurolang.query((x, y), p(x, y)).to_unnamed()
res_q = {(0, 2 + 4 + 6 + 8), (1, 1 + 3 + 5 + 7 + 9)}
assert sol == res_q
def test_neurolang_dl_aggregation_environment():
neurolang = frontend.NeurolangDL()
@neurolang.add_symbol
def sum_(x):
return sum(x)
with neurolang.environment as e:
for i in range(10):
e.q[i % 2, i] = True
e.p[e.x, sum_(e.y)] = e.q[e.x, e.y]
sol = neurolang.query(e.r(e.x, e.y), e.p(e.x, e.y))
res_q = {(0, 2 + 4 + 6 + 8), (1, 1 + 3 + 5 + 7 + 9)}
assert len(sol) == 2
assert sol["r"] == res_q
assert sol["p"] == res_q
def test_neurolang_dl_aggregation_environment_direct_query():
neurolang = frontend.NeurolangDL()
@neurolang.add_symbol
def sum_(x):
return sum(x)
with neurolang.environment as e:
for i in range(10):
e.q[i % 2, i] = True
e.p[e.x, sum_(e.y)] = e.q[e.x, e.y]
sol = neurolang.query((e.x, e.y), e.p(e.x, e.y)).to_unnamed()
res_q = {(0, 2 + 4 + 6 + 8), (1, 1 + 3 + 5 + 7 + 9)}
assert sol == res_q
def test_aggregation_number_of_arrivals():
neurolang = frontend.NeurolangDL()
@neurolang.add_symbol
def agg_count(x) -> int:
return len(x)
with neurolang.environment as e:
e.A[0, 1] = True
e.A[1, 2] = True
e.A[2, 3] = True
e.reachable[e.x, e.y] = e.A[e.x, e.y]
e.reachable[e.x, e.y] = e.reachable[e.x, e.z] & e.A[e.z, e.y]
e.count_destinations[e.x, agg_count(e.y)] = e.reachable[e.x, e.y]
res = neurolang.query((e.x, e.c), e.count_destinations(e.x, e.c))
assert res.to_unnamed() == {(0, 3), (1, 2), (2, 1)}
def test_frontend_operators():
neurolang = frontend.NeurolangDL()
a = neurolang.new_symbol(name='a')
b = a + 1
assert isinstance(b, qre.Operation)
assert b.expression.functor == exp.Constant(add)
assert b.expression.args == (a.expression, exp.Constant(1))
c = 1 + a
assert isinstance(c, qre.Operation)
assert c.expression.functor == exp.Constant(add)
assert c.expression.args == (exp.Constant(1), a.expression)
def test_neurolang_dl_attribute_access():
neurolang = frontend.NeurolangDL()
one_element = namedtuple("t", ("x", "y"))(1, 2)
a = neurolang.add_tuple_set([(one_element,)], name="a")
with neurolang.scope as e:
e.q[e.x] = a[e.x]
e.r[e.y] = a[e.w] & (e.y == e.w.x)
res = neurolang.solve_all()
q = res["q"]
r = res["r"]
assert len(q) == 1
el = next(q.to_unnamed().itervalues())[0]
assert el == one_element
assert r.to_unnamed() == {(one_element.x,)}
def test_neurolang_dl_set_destroy():
neurolang = frontend.NeurolangDL()
contains_ = neurolang.add_symbol(contains)
a = neurolang.add_tuple_set([(frozenset((0, 1, 2)),)], name="a")
with neurolang.scope as e:
e.q[e.y] = a[e.x] & contains_(e.x, e.y)
res = neurolang.solve_all()
q = res["q"].to_unnamed()
assert len(q) == 3
assert set(q) == {(0,), (1,), (2,)}
@pytest.mark.skip
@patch(
"neurolang.frontend.neurosynth_utils."
"NeuroSynthHandler.ns_region_set_from_term"
)
def test_neurosynth_region(mock_ns_regions):
mock_ns_regions.return_value = {
ExplicitVBR(np.array([[1, 0, 0], [1, 1, 0]]), np.eye(4))
}
neurolang = frontend.NeurolangDL()
s = neurolang.load_neurosynth_term_regions(
"gambling", 10, "gambling_regions"
)
res = neurolang[s]
mock_ns_regions.assert_called()
assert res.type is AbstractSet[Tuple[ExplicitVBR]]
assert res.value == frozenset((t,) for t in mock_ns_regions.return_value)
def test_translate_expression_to_fronted_expression():
qr = frontend.NeurolangDL()
tr = qre.TranslateExpressionToFrontEndExpression(qr)
assert tr.walk(exp.Constant(1)) == 1
symbol_exp = exp.Symbol("a")
symbol_fe = tr.walk(symbol_exp)
assert symbol_fe.expression == symbol_exp
assert symbol_fe.query_builder == tr.query_builder
fa_exp = symbol_exp(exp.Constant(1))
fa_fe = symbol_fe(1)
fa_fe_tr = tr.walk(fa_exp)
assert fa_fe_tr.expression == fa_exp
assert fa_fe_tr == fa_fe
fact_exp = Fact(fa_exp)
fact_fe = tr.walk(fact_exp)
assert fact_fe.expression == fact_exp
assert fact_fe.consequent == fa_fe
imp_exp = Implication(
symbol_exp(exp.Symbol("x")), exp.Symbol("b")(exp.Symbol("x"))
)
imp_fe = tr.walk(imp_exp)
assert imp_fe.expression == imp_exp
assert imp_fe.consequent == tr.walk(imp_exp.consequent)
assert imp_fe.antecedent == tr.walk(imp_exp.antecedent)
def test_first_column_sugar_body_s():
qr = frontend.NeurolangDL()
qr.add_tuple_set({
('one', 1), ('two', 2)
}, name='dd')
with qr.scope as e:
e.s[e.x] = (e.x == e.y) & e.dd('one', e.y)
e.r[e.x] = (e.x == (e.dd.s['one']))
res_all = qr.solve_all()
assert res_all['r'] == res_all['s']
def test_first_column_sugar_head_s():
qr = frontend.NeurolangDL()
qr.add_tuple_set({
(1, 'one'), (2, 'two')
}, name='dd')
with qr.scope as e:
e.r.s['one'] = e.dd('one')
res_all = qr.solve_all()
assert set(res_all['r']) == {('one', 1)}
def test_head_constant():
qr = frontend.NeurolangDL()
qr.add_tuple_set({
(1,)
}, name='dd')
with qr.scope as e:
e.r['one', e.x] = e.dd(e.x)
res_all = qr.solve_all()
assert set(res_all['r']) == {('one', 1)} | neurolang/frontend/tests/test_frontend.py | from collections import namedtuple
from operator import add, contains
from typing import AbstractSet, Callable, Tuple
from unittest.mock import patch
import numpy as np
import pandas as pd
import pytest
from ... import expressions as exp, frontend
from ...datalog import DatalogProgram, Fact, Implication
from ...exceptions import NeuroLangException, WrongArgumentsInPredicateError
from ...expression_walker import ExpressionBasicEvaluator
from ...regions import ExplicitVBR, SphericalVolume
from ...type_system import Unknown
from .. import query_resolution, query_resolution_expressions as qre
def test_symbol_management():
class Solver(DatalogProgram, ExpressionBasicEvaluator):
pass
neurolang = query_resolution.QueryBuilderBase(Solver())
sym = neurolang.new_symbol(int)
assert sym.expression.type is int
sym_ = neurolang.new_symbol(type_=(float, int))
assert sym_.expression.type is Tuple[float, int]
assert sym.expression.name != sym_.expression.name
a = neurolang.new_symbol(int, name="a")
assert a.expression.name == "a"
b = neurolang.add_symbol(1, name="b")
assert "b" in neurolang.symbols
assert b.value == 1
assert b.type is int
assert neurolang.symbols.b == b
with pytest.raises(AttributeError):
assert neurolang.symbols.c
@neurolang.add_symbol
def id(x: int) -> int:
return x
assert "id" in neurolang.symbols
assert id == neurolang.symbols.id
assert id == neurolang.symbols["id"]
assert id.type == Callable[[int], int]
f = neurolang.new_symbol()
new_expression = f(..., 1)
assert isinstance(new_expression.expression, exp.FunctionApplication)
assert new_expression.expression.functor == f.expression
assert isinstance(new_expression.expression.args[0], exp.Symbol)
assert new_expression.expression.args[0].is_fresh
assert isinstance(new_expression.expression.args[1], exp.Constant)
assert new_expression.expression.args[1].value == 1
def test_symbol_environment():
class Solver(DatalogProgram, ExpressionBasicEvaluator):
pass
neurolang = query_resolution.QueryBuilderBase(Solver())
b = neurolang.add_symbol(1, name="b")
neurolang.symbols._dynamic_mode = True
assert "c" not in neurolang.symbols
c = neurolang.symbols.c
assert c.type is Unknown
assert c.expression.name == "c"
del neurolang.symbols.b
assert b not in neurolang.symbols
neurolang.symbols._dynamic_mode = False
with neurolang.environment as e:
assert "c" not in e
c = e.c
assert c.type is Unknown
assert c.expression.name == "c"
e.d = 5
assert e.d.value == 5
assert e.d.type is int
assert neurolang.symbols.d.value == 5
assert neurolang.symbols.d.type is int
with neurolang.scope as e:
assert "f" not in e
f = e.f
assert f.type is Unknown
assert f.expression.name == "f"
e.g = 5
assert e.g.value == 5
assert e.g.type is int
assert "f" not in neurolang.symbols
assert "g" not in neurolang.symbols
def test_add_set():
neurolang = frontend.NeurolangDL()
s = neurolang.add_tuple_set(range(10), int)
res = neurolang[s]
assert s.type is AbstractSet[int]
assert res.type is AbstractSet[int]
assert res.value == frozenset((i,) for i in range(10))
assert isinstance(repr(res), str)
v = frozenset(zip(("a", "b", "c"), range(3)))
s = neurolang.add_tuple_set(v, (str, int))
res = neurolang[s]
assert s.type is AbstractSet[Tuple[str, int]]
assert res.type is AbstractSet[Tuple[str, int]]
assert res.value == v
assert isinstance(repr(res), str)
v = pd.DataFrame([[0, 's', .1], [1, 'p', .3]], columns=['A', 'B', 'C'])
s = neurolang.add_tuple_set(v)
res = neurolang[s]
assert s.type is AbstractSet[Tuple[int, str, float]]
assert res.type is AbstractSet[Tuple[int, str, float]]
assert res.value == set(v.itertuples(index=False))
assert isinstance(repr(res), str)
def test_add_set_neurolangdl():
neurolang = frontend.NeurolangDL()
s = neurolang.add_tuple_set(range(10), int)
res = neurolang[s]
assert s.type is AbstractSet[int]
assert res.type is AbstractSet[int]
assert res.value == frozenset((i,) for i in range(10))
v = frozenset(zip(("a", "b", "c"), range(3)))
s = neurolang.add_tuple_set(v, (str, int))
res = neurolang[s]
assert s.type is AbstractSet[Tuple[str, int]]
assert res.type is AbstractSet[Tuple[str, int]]
assert res.value == v
def test_query_regions_from_region_set():
neurolang = frontend.NeurolangDL()
central = ExplicitVBR(np.array([[0, 0, 5], [1, 1, 8]]), np.eye(4))
i1 = ExplicitVBR(np.array([[0, 0, 2], [1, 1, 3]]), np.eye(4))
i2 = ExplicitVBR(np.array([[0, 0, -1], [1, 1, 2]]), np.eye(4))
i3 = ExplicitVBR(np.array([[0, 0, -10], [1, 1, -5]]), np.eye(4))
regions_ = {(i1,), (i2,), (i3,)}
regions = neurolang.add_tuple_set(regions_)
x = neurolang.new_region_symbol(name="x")
query_result = neurolang.query(
(x,), regions(x) & neurolang.symbols.inferior_of(x, central)
)
assert len(query_result) == len(regions)
assert query_result.to_unnamed() == {(i1,), (i2,), (i3,)}
def test_query_new_predicate():
neurolang = frontend.NeurolangDL()
central = ExplicitVBR(np.array([[0, 0, 5], [1, 1, 8]]), np.eye(4))
inferior_posterior = ExplicitVBR(
np.array([[0, -10, -10], [1, -5, -5]]), np.eye(4)
)
inferior_central = ExplicitVBR(
np.array([[0, 0, -1], [1, 1, 2]]), np.eye(4)
)
inferior_anterior = ExplicitVBR(
np.array([[0, 2, 2], [1, 5, 3]]), np.eye(4)
)
regions_ = {
(inferior_posterior,),
(inferior_central,),
(inferior_anterior,),
}
regions = neurolang.add_tuple_set(regions_)
def posterior_and_inferior(y, z):
return neurolang.symbols.anatomical_posterior_of(
y, z
) & neurolang.symbols.anatomical_inferior_of(y, z)
x = neurolang.new_region_symbol(name="x")
query_result = neurolang.query(
(x,), regions(x) & posterior_and_inferior(x, central)
)
assert len(query_result) == 1
assert next(iter(query_result)) == (inferior_posterior,)
def test_query_single_symbol():
neurolang = frontend.NeurolangDL()
s = neurolang.add_tuple_set(
[(i,) for i in range(5)]
)
with neurolang.scope as e:
e.q[e.x] = s(e.x)
res = neurolang.query(e.x, e.q(e.x))
assert res.to_unnamed() == {(i,) for i in range(5)}
def test_query_wrong_head_arguments():
neurolang = frontend.NeurolangDL()
s = neurolang.add_tuple_set(
[(i, i) for i in range(5)]
)
with neurolang.scope as e:
e.q[e.x, e.y] = s(e.x, e.y)
with pytest.raises(WrongArgumentsInPredicateError):
neurolang.query((e.x, e.y, e.z), e.q(e.x, e.y, e.z))
with pytest.raises(NeuroLangException):
neurolang.query((e.x, e.y, e.z), e.q(e.x, e.y))
@pytest.mark.skip()
def test_load_spherical_volume_first_order():
neurolang = frontend.RegionFrontend()
inferior = ExplicitVBR(np.array([[0, 0, 0], [1, 1, 1]]), np.eye(4))
neurolang.add_region(inferior, name="inferior_region")
neurolang.sphere((0, 0, 0), 0.5, name="unit_sphere")
assert neurolang.symbols["unit_sphere"].value == SphericalVolume(
(0, 0, 0), 0.5
)
x = neurolang.new_region_symbol(name="x")
query = neurolang.query(
x, neurolang.symbols.overlapping(x, neurolang.symbols.unit_sphere)
)
query_result = query.do()
assert len(query_result.value) == 1
assert next(iter(query_result.value)) == inferior
neurolang.make_implicit_regions_explicit(np.eye(4), (500, 500, 500))
query = neurolang.query(
x, neurolang.symbols.overlapping(x, neurolang.symbols.unit_sphere)
)
query_result = query.do()
sphere_constant = neurolang.symbols["unit_sphere"].value
assert (
isinstance(sphere_constant, ExplicitVBR)
and np.array_equal(sphere_constant.affine, np.eye(4))
and sphere_constant.image_dim == (500, 500, 500)
and np.array_equal(sphere_constant.voxels, [[0, 0, 0]])
)
assert len(query_result.value) == 1
assert next(iter(query_result.value)) == inferior
def test_load_spherical_volume_datalog():
neurolang = frontend.NeurolangDL()
inferior = ExplicitVBR(np.array([[0, 0, 0], [1, 1, 1]]), np.eye(4))
regions = neurolang.add_tuple_set(
{(inferior, "inferior_region")}, name="regions"
)
neurolang.sphere((0, 0, 0), 0.5, name="unit_sphere")
assert neurolang.symbols["unit_sphere"].value == SphericalVolume(
(0, 0, 0), 0.5
)
q = neurolang.new_symbol()
x = neurolang.new_region_symbol(name="x")
n = neurolang.new_region_symbol(name="n")
query = neurolang.query(
q(x),
neurolang.symbols.overlapping(x, neurolang.symbols.unit_sphere)
& regions(x, n),
)
assert len(query.value) == 1
assert next(iter(query.value))[0] == inferior
neurolang.make_implicit_regions_explicit(np.eye(4), (500, 500, 500))
query = neurolang.query(
q(x),
neurolang.symbols.overlapping(x, neurolang.symbols.unit_sphere)
& regions(x, n),
)
assert len(query.value) == 1
assert next(iter(query.value))[0] == inferior
sphere_constant = neurolang.symbols["unit_sphere"].value
assert (
isinstance(sphere_constant, ExplicitVBR)
and np.array_equal(sphere_constant.affine, np.eye(4))
and sphere_constant.image_dim == (500, 500, 500)
and np.array_equal(sphere_constant.voxels, [[0, 0, 0]])
)
def test_neurolang_dl_query():
neurolang = frontend.NeurolangDL()
r = neurolang.new_symbol(name="r")
x = neurolang.new_symbol(name="x")
y = neurolang.new_symbol(name="y")
z = neurolang.new_symbol(name="z")
dataset = {(i, i * 2) for i in range(10)}
q = neurolang.add_tuple_set(dataset, name="q")
sol = neurolang.query((x, y), q(x, y)).to_unnamed()
assert sol == dataset
sol = neurolang.query(tuple(), q(x, x))
assert sol
assert neurolang.query(q(x, x))
sol = neurolang.query(tuple(), q(100, x))
assert not sol
assert not neurolang.query(q(100, x))
sol = neurolang.query((x,), q(x, y) & q(y, z)).to_unnamed()
res = set((x,) for x in range(5))
assert sol == res
r[x, y] = q(x, y)
r[x, z] = r[x, y] & q(y, z)
sol = neurolang.query((y,), r(1, y)).to_unnamed()
assert sol == set((x,) for x in (2, 4, 8, 16))
def test_neurolang_dl_solve_all():
neurolang = frontend.NeurolangDL()
r = neurolang.new_symbol(name="r")
x = neurolang.new_symbol(name="x")
dataset = {(i, i * 2) for i in range(10)}
q = neurolang.add_tuple_set(dataset, name="q")
r[x] = q(x, x)
sol = neurolang.solve_all()
assert sol["q"].to_unnamed() == dataset
assert sol["r"].to_unnamed() == set((i,) for i, j in dataset if i == j)
assert len(sol) == 2
assert neurolang.predicate_parameter_names(r) == ("x",)
def test_neurolange_dl_get_param_names():
neurolang = frontend.NeurolangDL()
r = neurolang.new_symbol(name="r")
x = neurolang.new_symbol(name="x")
dataset = {(i, i * 2) for i in range(10)}
q = neurolang.add_tuple_set(dataset, name="q")
r[x] = q(x, x)
@neurolang.add_symbol
def test_fun(x: int) -> int:
"""
HELP TEST
"""
return 0
assert neurolang.predicate_parameter_names("q") == ("0", "1")
assert neurolang.predicate_parameter_names(q) == ("0", "1")
assert neurolang.predicate_parameter_names(r) == ("x",)
assert neurolang.symbols[r].predicate_parameter_names == ("x",)
assert r[x].help() is not None
assert neurolang.symbols["test_fun"].help().strip() == "HELP TEST"
def test_neurolange_dl_named_sets():
neurolang = frontend.NeurolangDL()
r = neurolang.new_symbol(name="r")
s = neurolang.new_symbol(name="s")
x = neurolang.new_symbol(name="x")
y = neurolang.new_symbol(name="y")
dataset = {(i, i * 2) for i in range(10)}
q = neurolang.add_tuple_set(dataset, name="q")
r[x] = q(x, x)
s[x, y] = q(x, x) & (y == x)
res = neurolang.solve_all()
assert res["r"].columns == ("x",)
assert res["r"].row_type == Tuple[int]
assert res["r"].to_unnamed() == {(i,) for i, j in dataset if i == j}
def test_neurolange_dl_negation():
neurolang = frontend.NeurolangDL()
s = neurolang.new_symbol(name="s")
x = neurolang.new_symbol(name="x")
y = neurolang.new_symbol(name="y")
dataset = {(i, i * 2) for i in range(10)}
q = neurolang.add_tuple_set(dataset, name="q")
s[x, y] = ~q(x, x) & q(x, y)
res = neurolang.solve_all()
assert res["s"].to_unnamed() == {(i, j) for i, j in dataset if i != j}
def test_neurolang_dl_datalog_code_list_symbols():
neurolang = frontend.NeurolangDL()
original_symbols = set(neurolang.symbols)
neurolang.execute_datalog_program(
"""
A(4, 5)
A(5, 6)
A(6, 5)
B(x,y) :- A(x, y)
B(x,y) :- B(x, z),A(z, y)
C(x) :- B(x, y), y == 5
D("x")
"""
)
assert set(neurolang.symbols) == {"A", "B", "C", "D"} | original_symbols
def test_neurolang_dl_datalog_code():
neurolang = frontend.NeurolangDL()
neurolang.execute_datalog_program(
"""
A(4, 5)
A(5, 6)
A(6, 5)
B(x,y) :- A(x, y)
B(x,y) :- B(x, z),A(z, y)
C(x) :- B(x, y), y == 5
D("x")
"""
)
res = neurolang.solve_all()
assert res["A"].row_type == Tuple[int, int]
assert res["A"].to_unnamed() == {(4, 5), (5, 6), (6, 5)}
assert res["B"].to_unnamed() == {
(4, 5),
(5, 6),
(6, 5),
(4, 6),
(5, 5),
(6, 6),
}
assert res["C"].to_unnamed() == {(4,), (5,), (6,)}
assert res["D"].to_unnamed() == {
("x",),
}
def test_neurolang_dl_aggregation():
neurolang = frontend.NeurolangDL()
q = neurolang.new_symbol(name="q")
p = neurolang.new_symbol(name="p")
r = neurolang.new_symbol(name="r")
x = neurolang.new_symbol(name="x")
y = neurolang.new_symbol(name="y")
@neurolang.add_symbol
def sum_(x):
return sum(x)
for i in range(10):
q[i % 2, i] = True
p[x, sum_(y)] = q[x, y]
sol = neurolang.query(r(x, y), p(x, y))
res_q = {(0, 2 + 4 + 6 + 8), (1, 1 + 3 + 5 + 7 + 9)}
assert len(sol) == 2
assert sol[r] == res_q
assert sol[p] == res_q
def test_neurolang_dl_aggregation_direct_query():
neurolang = frontend.NeurolangDL()
q = neurolang.new_symbol(name="q")
p = neurolang.new_symbol(name="p")
x = neurolang.new_symbol(name="x")
y = neurolang.new_symbol(name="y")
@neurolang.add_symbol
def sum_(x):
return sum(x)
for i in range(10):
q[i % 2, i] = True
p[x, sum_(y)] = q[x, y]
sol = neurolang.query((x, y), p(x, y)).to_unnamed()
res_q = {(0, 2 + 4 + 6 + 8), (1, 1 + 3 + 5 + 7 + 9)}
assert sol == res_q
def test_neurolang_dl_aggregation_environment():
neurolang = frontend.NeurolangDL()
@neurolang.add_symbol
def sum_(x):
return sum(x)
with neurolang.environment as e:
for i in range(10):
e.q[i % 2, i] = True
e.p[e.x, sum_(e.y)] = e.q[e.x, e.y]
sol = neurolang.query(e.r(e.x, e.y), e.p(e.x, e.y))
res_q = {(0, 2 + 4 + 6 + 8), (1, 1 + 3 + 5 + 7 + 9)}
assert len(sol) == 2
assert sol["r"] == res_q
assert sol["p"] == res_q
def test_neurolang_dl_aggregation_environment_direct_query():
neurolang = frontend.NeurolangDL()
@neurolang.add_symbol
def sum_(x):
return sum(x)
with neurolang.environment as e:
for i in range(10):
e.q[i % 2, i] = True
e.p[e.x, sum_(e.y)] = e.q[e.x, e.y]
sol = neurolang.query((e.x, e.y), e.p(e.x, e.y)).to_unnamed()
res_q = {(0, 2 + 4 + 6 + 8), (1, 1 + 3 + 5 + 7 + 9)}
assert sol == res_q
def test_aggregation_number_of_arrivals():
neurolang = frontend.NeurolangDL()
@neurolang.add_symbol
def agg_count(x) -> int:
return len(x)
with neurolang.environment as e:
e.A[0, 1] = True
e.A[1, 2] = True
e.A[2, 3] = True
e.reachable[e.x, e.y] = e.A[e.x, e.y]
e.reachable[e.x, e.y] = e.reachable[e.x, e.z] & e.A[e.z, e.y]
e.count_destinations[e.x, agg_count(e.y)] = e.reachable[e.x, e.y]
res = neurolang.query((e.x, e.c), e.count_destinations(e.x, e.c))
assert res.to_unnamed() == {(0, 3), (1, 2), (2, 1)}
def test_frontend_operators():
neurolang = frontend.NeurolangDL()
a = neurolang.new_symbol(name='a')
b = a + 1
assert isinstance(b, qre.Operation)
assert b.expression.functor == exp.Constant(add)
assert b.expression.args == (a.expression, exp.Constant(1))
c = 1 + a
assert isinstance(c, qre.Operation)
assert c.expression.functor == exp.Constant(add)
assert c.expression.args == (exp.Constant(1), a.expression)
def test_neurolang_dl_attribute_access():
neurolang = frontend.NeurolangDL()
one_element = namedtuple("t", ("x", "y"))(1, 2)
a = neurolang.add_tuple_set([(one_element,)], name="a")
with neurolang.scope as e:
e.q[e.x] = a[e.x]
e.r[e.y] = a[e.w] & (e.y == e.w.x)
res = neurolang.solve_all()
q = res["q"]
r = res["r"]
assert len(q) == 1
el = next(q.to_unnamed().itervalues())[0]
assert el == one_element
assert r.to_unnamed() == {(one_element.x,)}
def test_neurolang_dl_set_destroy():
neurolang = frontend.NeurolangDL()
contains_ = neurolang.add_symbol(contains)
a = neurolang.add_tuple_set([(frozenset((0, 1, 2)),)], name="a")
with neurolang.scope as e:
e.q[e.y] = a[e.x] & contains_(e.x, e.y)
res = neurolang.solve_all()
q = res["q"].to_unnamed()
assert len(q) == 3
assert set(q) == {(0,), (1,), (2,)}
@pytest.mark.skip
@patch(
"neurolang.frontend.neurosynth_utils."
"NeuroSynthHandler.ns_region_set_from_term"
)
def test_neurosynth_region(mock_ns_regions):
mock_ns_regions.return_value = {
ExplicitVBR(np.array([[1, 0, 0], [1, 1, 0]]), np.eye(4))
}
neurolang = frontend.NeurolangDL()
s = neurolang.load_neurosynth_term_regions(
"gambling", 10, "gambling_regions"
)
res = neurolang[s]
mock_ns_regions.assert_called()
assert res.type is AbstractSet[Tuple[ExplicitVBR]]
assert res.value == frozenset((t,) for t in mock_ns_regions.return_value)
def test_translate_expression_to_fronted_expression():
qr = frontend.NeurolangDL()
tr = qre.TranslateExpressionToFrontEndExpression(qr)
assert tr.walk(exp.Constant(1)) == 1
symbol_exp = exp.Symbol("a")
symbol_fe = tr.walk(symbol_exp)
assert symbol_fe.expression == symbol_exp
assert symbol_fe.query_builder == tr.query_builder
fa_exp = symbol_exp(exp.Constant(1))
fa_fe = symbol_fe(1)
fa_fe_tr = tr.walk(fa_exp)
assert fa_fe_tr.expression == fa_exp
assert fa_fe_tr == fa_fe
fact_exp = Fact(fa_exp)
fact_fe = tr.walk(fact_exp)
assert fact_fe.expression == fact_exp
assert fact_fe.consequent == fa_fe
imp_exp = Implication(
symbol_exp(exp.Symbol("x")), exp.Symbol("b")(exp.Symbol("x"))
)
imp_fe = tr.walk(imp_exp)
assert imp_fe.expression == imp_exp
assert imp_fe.consequent == tr.walk(imp_exp.consequent)
assert imp_fe.antecedent == tr.walk(imp_exp.antecedent)
def test_first_column_sugar_body_s():
qr = frontend.NeurolangDL()
qr.add_tuple_set({
('one', 1), ('two', 2)
}, name='dd')
with qr.scope as e:
e.s[e.x] = (e.x == e.y) & e.dd('one', e.y)
e.r[e.x] = (e.x == (e.dd.s['one']))
res_all = qr.solve_all()
assert res_all['r'] == res_all['s']
def test_first_column_sugar_head_s():
qr = frontend.NeurolangDL()
qr.add_tuple_set({
(1, 'one'), (2, 'two')
}, name='dd')
with qr.scope as e:
e.r.s['one'] = e.dd('one')
res_all = qr.solve_all()
assert set(res_all['r']) == {('one', 1)}
def test_head_constant():
qr = frontend.NeurolangDL()
qr.add_tuple_set({
(1,)
}, name='dd')
with qr.scope as e:
e.r['one', e.x] = e.dd(e.x)
res_all = qr.solve_all()
assert set(res_all['r']) == {('one', 1)} | 0.847747 | 0.589864 |
import os
import numpy as np
import pandas as pd
from detectron2.data import DatasetCatalog, MetadataCatalog
def setup_cls_data_catalog(cfg):
"""
register datasetcatalog and metadata_catalog
"""
# register dataset_catalog
dataset_dict_fn = SpineClsDatasetFunction(cfg, "train")
DatasetCatalog.register("spine_cls_train", dataset_dict_fn)
dataset_dict_fn = SpineClsDatasetFunction(cfg, "test")
DatasetCatalog.register("spine_cls_test", dataset_dict_fn)
# register metadata_catalog
classes = cfg.MODEL.CLASSIFIER.CLASSES
MetadataCatalog.get("spine_cls_train").thing_classes = classes
MetadataCatalog.get("spine_cls_test").thing_classes = classes
print(
MetadataCatalog.get("spine_cls_train"), "\n",
MetadataCatalog.get("spine_cls_test")
)
class SpineClsDatasetFunction:
def __init__(self, cfg, mode="train"):
assert mode in ["train", "test"]
self.mode = mode
self.cfg = cfg
def __call__(self):
"""
return list[dict]
"""
cfg = self.cfg
disease_classes = cfg.MODEL.CLASSIFIER.CLASSES
data_folder = cfg.SPINE.TRAIN_DATA_FOLDER if self.mode == "train" else cfg.SPINE.TEST_DATA_FOLDER
metadata = cfg.SPINE.TRAIN_METADATA if self.mode == "train" else cfg.SPINE.TEST_METADATA
metadata = pd.read_csv(metadata)
metadata = metadata[["image_id", "image_height", "image_width"]]
metadata = metadata.set_index("image_id")
metadata = metadata.to_dict(orient="index")
annotations = cfg.SPINE.TRAIN_ANNOTATION if self.mode == "train" else cfg.SPINE.TEST_ANNOTATION
annotations = pd.read_csv(annotations)
abnormal_only = "Abnormal" in disease_classes
if abnormal_only:
assert len(disease_classes) == 1
dataset_dict = []
for image_id, rows in annotations.groupby("image_id"):
instance_dict = {}
instance_dict["file_name"] = os.path.join(data_folder, f"{image_id}.png")
instance_dict["image_id"] = image_id
instance_dict["height"] = metadata[image_id]["image_height"]
instance_dict["width"] = metadata[image_id]["image_width"]
classes = rows["lesion_type"].unique().tolist()
if abnormal_only:
label = 0. if "No finding" in classes else 1.
labels = np.array([label])
else:
labels = np.array([0.0]*len(disease_classes))
for label in classes:
if (label not in disease_classes) and "Other disease" in disease_classes:
label = "Other disease"
labels[disease_classes.index(label)] = 1.
instance_dict["classes"] = labels
dataset_dict.append(instance_dict)
return dataset_dict | spine/classification/cls_dataset_dict.py |
import os
import numpy as np
import pandas as pd
from detectron2.data import DatasetCatalog, MetadataCatalog
def setup_cls_data_catalog(cfg):
"""
register datasetcatalog and metadata_catalog
"""
# register dataset_catalog
dataset_dict_fn = SpineClsDatasetFunction(cfg, "train")
DatasetCatalog.register("spine_cls_train", dataset_dict_fn)
dataset_dict_fn = SpineClsDatasetFunction(cfg, "test")
DatasetCatalog.register("spine_cls_test", dataset_dict_fn)
# register metadata_catalog
classes = cfg.MODEL.CLASSIFIER.CLASSES
MetadataCatalog.get("spine_cls_train").thing_classes = classes
MetadataCatalog.get("spine_cls_test").thing_classes = classes
print(
MetadataCatalog.get("spine_cls_train"), "\n",
MetadataCatalog.get("spine_cls_test")
)
class SpineClsDatasetFunction:
def __init__(self, cfg, mode="train"):
assert mode in ["train", "test"]
self.mode = mode
self.cfg = cfg
def __call__(self):
"""
return list[dict]
"""
cfg = self.cfg
disease_classes = cfg.MODEL.CLASSIFIER.CLASSES
data_folder = cfg.SPINE.TRAIN_DATA_FOLDER if self.mode == "train" else cfg.SPINE.TEST_DATA_FOLDER
metadata = cfg.SPINE.TRAIN_METADATA if self.mode == "train" else cfg.SPINE.TEST_METADATA
metadata = pd.read_csv(metadata)
metadata = metadata[["image_id", "image_height", "image_width"]]
metadata = metadata.set_index("image_id")
metadata = metadata.to_dict(orient="index")
annotations = cfg.SPINE.TRAIN_ANNOTATION if self.mode == "train" else cfg.SPINE.TEST_ANNOTATION
annotations = pd.read_csv(annotations)
abnormal_only = "Abnormal" in disease_classes
if abnormal_only:
assert len(disease_classes) == 1
dataset_dict = []
for image_id, rows in annotations.groupby("image_id"):
instance_dict = {}
instance_dict["file_name"] = os.path.join(data_folder, f"{image_id}.png")
instance_dict["image_id"] = image_id
instance_dict["height"] = metadata[image_id]["image_height"]
instance_dict["width"] = metadata[image_id]["image_width"]
classes = rows["lesion_type"].unique().tolist()
if abnormal_only:
label = 0. if "No finding" in classes else 1.
labels = np.array([label])
else:
labels = np.array([0.0]*len(disease_classes))
for label in classes:
if (label not in disease_classes) and "Other disease" in disease_classes:
label = "Other disease"
labels[disease_classes.index(label)] = 1.
instance_dict["classes"] = labels
dataset_dict.append(instance_dict)
return dataset_dict | 0.479747 | 0.324423 |
import numpy as np
import glob
from scipy.interpolate import interp1d
import multiprocessing as mp
from bisect import bisect
def GetWaveNumbers(StartWavelength=300, EndWavelength=30000, Resolution=100000):
'''
Returns the wavelengths corresponding to the resolution and in units
of cm.
'''
WaveLengthValues = []
#Converting values to
StartWavelength = StartWavelength*1.0e-7 #nm to cm
EndWavelength = EndWavelength*1.0e-7 #nm to cm
WaveLengthValues = [StartWavelength]
while WaveLengthValues[-1]<EndWavelength:
WaveLengthValues.append(WaveLengthValues[-1]+WaveLengthValues[-1]/Resolution)
WaveLengthValues = np.array(WaveLengthValues)
WaveNumberRange = 1./WaveLengthValues
WaveNumberRange = np.array(sorted(WaveNumberRange))
return WaveLengthValues, WaveNumberRange
def BinModel(nu_HR,abs_HR,nu_Grid):
'''
This function takes a cross-section at high resolution:
nu_HR is the wavenumber in increasing order
abs_HR is the absorption cross-section in an increasing order
The stepsize in the WaveNumberGrid is not expected to be the equal
'''
InterpValues = np.zeros(len(nu_Grid))
Start = 0
i = 0
while i<len(nu_Grid):
StartIndex = bisect(nu_HR, Start)
StopIndex = bisect(nu_HR, nu_Grid[i])
InterpValues[i] = np.mean(abs_HR[StartIndex:StopIndex])
Start=nu_Grid[i]
i+=1
NanIndex = np.isnan(InterpValues)
InterpValues[NanIndex] = 0.0
return InterpValues
def SymplecticInterpolation(nu_HR,abs_HR,nu_Grid):
'''
This function takes a cross-section at high resolution:
nu_HR is the wavenumber in increasing order
abs_HR is the absorption cross-section in an increasing order
The stepsize in the WaveNumberGrid is not expected to be the equal
'''
#Assert the Wavenumber is strictly increasing
assert nu_HR[-1]>nu_HR[0], "The high resolution nu should also be strictly increasing."
assert nu_Grid[-1]>nu_Grid[0], "The low resolution wavenumber should also be strictly increasing."
InterpolatedValues = np.zeros(len(nu_Grid))
for i in range(len(nu_Grid)):
if i+1<len(nu_Grid):
StepSize= nu_Grid[i+1] - nu_Grid[i]
SelectIndex = np.abs(nu_HR-nu_Grid[i])<StepSize/2.0
InterpolatedValues[i] = np.mean(abs_HR[SelectIndex])
NanIndex = np.isnan(InterpolatedValues)
InterpolatedValues[NanIndex] = 0.0
return InterpolatedValues | lib/CrossSectionFunctions.py | import numpy as np
import glob
from scipy.interpolate import interp1d
import multiprocessing as mp
from bisect import bisect
def GetWaveNumbers(StartWavelength=300, EndWavelength=30000, Resolution=100000):
'''
Returns the wavelengths corresponding to the resolution and in units
of cm.
'''
WaveLengthValues = []
#Converting values to
StartWavelength = StartWavelength*1.0e-7 #nm to cm
EndWavelength = EndWavelength*1.0e-7 #nm to cm
WaveLengthValues = [StartWavelength]
while WaveLengthValues[-1]<EndWavelength:
WaveLengthValues.append(WaveLengthValues[-1]+WaveLengthValues[-1]/Resolution)
WaveLengthValues = np.array(WaveLengthValues)
WaveNumberRange = 1./WaveLengthValues
WaveNumberRange = np.array(sorted(WaveNumberRange))
return WaveLengthValues, WaveNumberRange
def BinModel(nu_HR,abs_HR,nu_Grid):
'''
This function takes a cross-section at high resolution:
nu_HR is the wavenumber in increasing order
abs_HR is the absorption cross-section in an increasing order
The stepsize in the WaveNumberGrid is not expected to be the equal
'''
InterpValues = np.zeros(len(nu_Grid))
Start = 0
i = 0
while i<len(nu_Grid):
StartIndex = bisect(nu_HR, Start)
StopIndex = bisect(nu_HR, nu_Grid[i])
InterpValues[i] = np.mean(abs_HR[StartIndex:StopIndex])
Start=nu_Grid[i]
i+=1
NanIndex = np.isnan(InterpValues)
InterpValues[NanIndex] = 0.0
return InterpValues
def SymplecticInterpolation(nu_HR,abs_HR,nu_Grid):
'''
This function takes a cross-section at high resolution:
nu_HR is the wavenumber in increasing order
abs_HR is the absorption cross-section in an increasing order
The stepsize in the WaveNumberGrid is not expected to be the equal
'''
#Assert the Wavenumber is strictly increasing
assert nu_HR[-1]>nu_HR[0], "The high resolution nu should also be strictly increasing."
assert nu_Grid[-1]>nu_Grid[0], "The low resolution wavenumber should also be strictly increasing."
InterpolatedValues = np.zeros(len(nu_Grid))
for i in range(len(nu_Grid)):
if i+1<len(nu_Grid):
StepSize= nu_Grid[i+1] - nu_Grid[i]
SelectIndex = np.abs(nu_HR-nu_Grid[i])<StepSize/2.0
InterpolatedValues[i] = np.mean(abs_HR[SelectIndex])
NanIndex = np.isnan(InterpolatedValues)
InterpolatedValues[NanIndex] = 0.0
return InterpolatedValues | 0.493897 | 0.494141 |
from os import path
signature = db.Table(db,'auth_signature',
Field('created_on','datetime',default=request.now,
writable=False,readable=False, label=T('Created on')),
Field('created_by','reference %s' % auth.settings.table_user_name,default=auth.user_id,
writable=False,readable=False, label=T('Created by')),
Field('modified_on','datetime',update=request.now,default=request.now,
writable=False,readable=False, label=T('Modified on')),
Field('modified_by','reference %s' % auth.settings.table_user_name,
default=auth.user_id,update=auth.user_id,
writable=False,readable=False, label=T('Modified by'))
)
db._common_fields.append(signature) #db._common_fields is a list of fields that should belong to all the tables
db.define_table('contact',
Field('name', label=T('Contact name')),
Field('trade_register_number', label=T('Trade register number')),
Field('description', 'text', label=T('Description')),
Field('address', 'text', label=T('Address')),
Field('google_maps_plan_url', 'text', label=T('Google maps plan url')),
Field('telephone', label=T('Telephone')),
Field('fax', label=T('Fax')),
Field('mobile', label=T('Mobile')),
Field('website', label=T('Website')),
Field('email', label=T('Email')),
Field('contact_form_email', label=T('Contact form email')),
Field('contact_form_cc', label=T('Contact form cc')),
Field('contact_form_bcc', label=T('Contact form cci')),
Field('show_in_address_component', 'boolean', default=True, label=T('Show in address component')),
Field('show_in_contact_form', 'boolean', default=True, label=T('Show in contact form'))
)
db.contact.website.requires = IS_EMPTY_OR(IS_URL())
db.contact.email.requires = IS_EMPTY_OR(IS_EMAIL())
db.contact.contact_form_email.requires = IS_EMPTY_OR(IS_EMAIL())
db.contact.contact_form_cc.requires = IS_EMPTY_OR(IS_EMAIL())
db.contact.contact_form_bcc.requires = IS_EMPTY_OR(IS_EMAIL())
db.define_table('website_parameters',
Field('last_fixture_date', 'date', label=T('Last fixture date'), comment=T('When last_fixture_date < current date, we apply the fixtures (see models/x_fixtures.py)')),
Field('website_name_long', label=T('Website name long'), comment=T('Shown in the banner footer')),
Field('website_name', label=T('Website name'), comment=T('Shown in top left logo')),
Field('website_title', label=T('Website title'), comment=T('Displayed instead of the banner if "with_banner"=False')),
Field('website_subtitle', label=T('Website subtitle'), comment=T('Shown in the banner footer')),
Field('website_url', label=T('Url'), comment=T('URL of the website')),
Field('force_language', label=T('Force a language (en, it, es, fr, ...)')),
Field('booking_form_email', label=T('Booking form email'), comment=T('Messages of the booking form will be sent to this email')),
Field('booking_form_cc', label=T('Booking form cc'), comment=T('Messages of the booking form will be cc to this email')),
Field('booking_form_bcc', label=T('Booking form cci'), comment=T('Messages of the booking form will be cci to this email')),
Field('max_old_news_to_show', 'integer',label=T('Max number of old news'), comment=T('How many old news (date < current date) shall we show?')),
Field('max_gallery_images_to_show', 'integer',label=T('Max number of images in gallery'), comment=T('How many maximum images shall we show in photo gallery?')),
Field('mailserver_url', label=T('Mail server url'), comment=T('URL of the mailserver (used to send email in forms)')),
Field('mailserver_port', 'integer', label=T('Mail server port'), comment=T('Port of the mailserver (used to send email in forms)')),
Field('mailserver_sender_mail', label=T('Mail server sender email'), comment=T('Sender email adress of the mailserver (used to send email in forms)')),
Field('mailserver_sender_login', label=T('Mail server sender login'), comment=T('Login of the mailserver (used to send email in forms)')),
Field('mailserver_sender_pass', label=T('Mail server sender pass'), comment=T('Pass of the mailserver (used to send email in forms)')),
Field('google_analytics_id', label=T('Google analytics id'), comment=T('Your Google Analytics account ID')),
Field('navbar_inverse', 'boolean', default=True, label=T('Inverse navbar color')),
Field('with_banner', 'boolean', default=True, label=T('Show a banner'), comment=T('If False, we display website_title instead')),
Field('with_specific_banner', 'boolean', label=T('Use the specific banner'), comment=T('Include the content of "views/specificbanner.html"')),
Field('add_website_name_as_logo', 'boolean', label=T('Add the website name as a logo'), comment=T('Will be displayed at the top left corner')),
Field('custom_bootstrap_css_file', label=T('Name of the custom bootstrap CSS file')),
Field('banner_image_always', label=T('Banner image always shown'), comment=T('URI of the image which will be always shown in the banner')),
Field('banner_image_desktop', label=T('Banner image shown on desktop mode'), comment=T('URI of the image which will be shown in the banner on desktop mode onlw')),
Field('banner_image_tablet', label=T('Banner image shown on tablet mode'), comment=T('URI of the image which will be shown in the banner on tablet mode only')),
Field('banner_image_phone', label=T('Banner image shown on phone mode'), comment=T('URI of the image which will be shown in the banner on phone mode only')),
Field('banner_image_background_gradient_from', label=T('Banner image background gradient from'), comment=T('Start color to display a gradient behind banner image')),
Field('banner_image_background_gradient_to', label=T('Banner image background gradient to'), comment=T('End color to display a gradient behind banner image')),
Field('seo_website_title', label=T('SEO : Website title'), comment=T('Displayed in <title> tag of the HTML source code')),
Field('seo_meta_author', label=T('SEO : Meta "author"'), comment=T('Displayed in <meta author> tag of the HTML source code')),
Field('seo_meta_description', label=T('SEO : Meta "description"'), comment=T('Displayed in <meta description> tag of the HTML source code')),
Field('seo_meta_keywords', label=T('SEO : Meta "keywords"'), comment=T('Displayed in <meta keywords> tag of the HTML source code')),
Field('seo_meta_generator', label=T('SEO : Meta "generator"'), comment=T('Displayed in <meta generator> tag of the HTML source code')),
Field('show_booking_menu', 'boolean', default=True, label=T('Show booking menu'), comment=T('Show the booking menu (to manage booking requests)')),
Field('show_event_menu', 'boolean', default=True, label=T('Show event menu'), comment=T('Show the event menu (to manage events on a calendar)')),
Field('disqus_shortname', default=True, label=T('Disqus shortname'), comment=T('Add here your disqus shortname to activate comments on your pages. Note : you need to fill "website_url" too!'))
)
db.website_parameters.website_url.requires = IS_EMPTY_OR(IS_URL())
db.website_parameters.mailserver_sender_mail.requires = IS_EMPTY_OR(IS_EMAIL())
db.website_parameters.booking_form_email.requires = IS_EMPTY_OR(IS_EMAIL())
db.website_parameters.booking_form_cc.requires = IS_EMPTY_OR(IS_EMAIL())
db.website_parameters.booking_form_bcc.requires = IS_EMPTY_OR(IS_EMAIL())
db.define_table('page_component',
Field('controller', readable=False, writable=False, default='default', label=T('Component controller')),
Field('name', unique=False, readable=False, writable=False, label=T('Component name')),
Field('description', readable=False, writable=False, label=T('Component description')),
Field('ajax', 'boolean', readable=False, writable=False, default=False, label=T('Component with Ajax')),
Field('ajax_trap', 'boolean', readable=False, writable=False, default=False, label=T('Component with Ajax trap')),
Field('container_class', readable=False, writable=False, label=T('Class of the container'), comment=T('For example "hidden-phone"')),
Field('parent', 'reference page_component', label=T('Parent')),
Field('rank', 'integer', readable=True, writable=True, default=0, label=T('Rank')),
)
db.page_component.parent.requires = IS_EMPTY_OR(IS_IN_DB(db, db.page_component.id, '%(description)s', zero=T('<Empty>')))
db.define_table('page',
Field('parent', 'reference page', label=T('Parent')),
Field('title', unique=True, notnull=True, label=T('Title')),
Field('rank', 'integer', readable=True, writable=True, default=0, label=T('Rank')),
Field('subtitle', label=T('Subtitle')),
Field('url', unique=True, readable=True, writable=True, label=T('Url')),
Field('content', 'text', label=T('Content')),
Field('is_index', 'boolean', readable=True, writable=True, default=False, label=T('Is index')),
Field('is_enabled', 'boolean', readable=True, writable=True, default=True, label=T('Is enabled')),
Field('left_sidebar_enabled', 'boolean', default=False, label=T('Left sidebar')),
Field('right_sidebar_enabled', 'boolean', default=False, label=T('Right sidebar')),
Field('header_component', 'reference page_component', label=T('Header component')),
Field('left_sidebar_component', 'reference page_component', label=T('Left sidebar component')),
Field('right_sidebar_component', 'reference page_component', label=T('Right sidebar component')),
Field('left_footer_component', 'reference page_component', label=T('Left footer component')),
Field('middle_footer_component', 'reference page_component', label=T('Middle footer component')),
Field('right_footer_component', 'reference page_component', label=T('Right footer component')),
Field('central_component', 'reference page_component', label=T('Central component')),
Field('allow_disqus', 'boolean', label=T('Allow disqus (must be configured in website_parameters)')),
Field('max_content_height', 'integer', readable=True, writable=True, default=0, label=T('Max height (in pixels) of the page content (0 = no max height)')),
format='%(title)s'
)
db.page.parent.requires = IS_EMPTY_OR(IS_IN_DB(db, db.page.id, '%(title)s', zero=T('<Empty>')))
db.page.header_component.requires = IS_EMPTY_OR(IS_IN_DB(db, db.page_component.id, '%(name)s - %(description)s', zero=T('<Empty>')))
db.page.left_sidebar_component.requires = IS_EMPTY_OR(IS_IN_DB(db, db.page_component.id, '%(name)s - %(description)s', zero=T('<Empty>')))
db.page.right_sidebar_component.requires = IS_EMPTY_OR(IS_IN_DB(db, db.page_component.id, '%(name)s - %(description)s', zero=T('<Empty>')))
db.page.left_footer_component.requires = IS_EMPTY_OR(IS_IN_DB(db, db.page_component.id, '%(name)s - %(description)s', zero=T('<Empty>')))
db.page.right_footer_component.requires = IS_EMPTY_OR(IS_IN_DB(db, db.page_component.id, '%(name)s - %(description)s', zero=T('<Empty>')))
db.page.middle_footer_component.requires = IS_EMPTY_OR(IS_IN_DB(db, db.page_component.id, '%(name)s - %(description)s', zero=T('<Empty>')))
db.page.central_component.requires = IS_EMPTY_OR(IS_IN_DB(db, db.page_component.id, '%(name)s - %(description)s', zero=T('<Empty>')))
db.page.url.compute = lambda row: IS_SLUG()(row.title)[0]
pageSelector = HierarchicalSelect(db, db.page, db.page.title, db.page.rank)
db.page.parent.widget = pageSelector.widget
db.define_table('image',
Field('page', 'reference page', label=T('Page')),
Field('name', notnull=True, label=T('Name')),
Field('alt', label=T('Alt')),
Field('comment', label=T('Comment')),
Field('file', 'upload', uploadfolder=path.join(
request.folder,'static','images','photo_gallery'
), autodelete=True, label=T('File')),
Field('thumb', 'text', readable=False, writable=False, label=T('Thumb')),
Field('show_in_gallery', 'boolean', readable=True, writable=True, default=True, label=T('Show in gallery')),
Field('show_in_banner', 'boolean', readable=True, writable=True, default=False, label=T('Show in banner')),
format='%(name)s'
)
db.image.page.requires = IS_EMPTY_OR(IS_IN_DB(db, db.page.id, '%(title)s', zero=T('<Empty>')))
db.image.alt.compute = lambda row: row.name.capitalize()
#db.image.page.widget = pageSelector.widget
db.define_table('registered_user',
Field('first_name', label=T('First name')),
Field('last_name', label=T('Last name')),
Field('email', unique=True, requires=[IS_EMAIL(), IS_NOT_IN_DB(db, 'registered_user.email')], label=T('Email')),
Field('subscribe_to_newsletter', 'boolean', default=True, label=T("User want to receive newsletter emails")),
format='%(email)s'
)
db.define_table('news',
Field('title', label=T('Title')),
Field('date','date',default=request.now,label=T('Date')),
Field('text','text',label=T('News content')),
Field('published_on', 'datetime', default=request.now),
Field('send_mail', 'boolean', readable=True, writable=True, default=False, label=T('Send an email to the registered users to inform them')),
Field('mail_sent', 'boolean', readable=False, writable=False, default=False, label=T('An email has been send to the registered users')),
Field('max_content_height', 'integer', readable=True, writable=True, default=0, label=T('Max height (in pixels) of the news content (0 = no max height)')),
format='%(text)s'
)
db.define_table('file',
Field('page', 'reference page', label=T('Page')),
Field('title', label=T('Title'), notnull=True),
Field('comment', label=T('Comment')),
Field('file', 'upload', uploadfolder=path.join(
request.folder,'static','uploaded_files'
), notnull=True, autodelete=True, label=T('File')),
Field('protected', 'boolean', readable=True, writable=True, default=False, label=T('Protected (visible only for authorized users)')),
Field('size', 'double', readable=False, writable=False, label=T('Size')),
format='%(title)s'
)
db.file.page.requires = IS_EMPTY_OR(IS_IN_DB(db, db.page.id, '%(title)s', zero=T('<Empty>')))
db.file.size.compute = lambda row: path.getsize(path.join(request.folder,'static','uploaded_files',row.file))
#db.file.page.widget = pageSelector.widget
## after defining tables, uncomment below to enable auditing
auth.enable_record_versioning(db) | web2py-appliances-master/TinyWebsite/models/db__website.py | from os import path
signature = db.Table(db,'auth_signature',
Field('created_on','datetime',default=request.now,
writable=False,readable=False, label=T('Created on')),
Field('created_by','reference %s' % auth.settings.table_user_name,default=auth.user_id,
writable=False,readable=False, label=T('Created by')),
Field('modified_on','datetime',update=request.now,default=request.now,
writable=False,readable=False, label=T('Modified on')),
Field('modified_by','reference %s' % auth.settings.table_user_name,
default=auth.user_id,update=auth.user_id,
writable=False,readable=False, label=T('Modified by'))
)
db._common_fields.append(signature) #db._common_fields is a list of fields that should belong to all the tables
db.define_table('contact',
Field('name', label=T('Contact name')),
Field('trade_register_number', label=T('Trade register number')),
Field('description', 'text', label=T('Description')),
Field('address', 'text', label=T('Address')),
Field('google_maps_plan_url', 'text', label=T('Google maps plan url')),
Field('telephone', label=T('Telephone')),
Field('fax', label=T('Fax')),
Field('mobile', label=T('Mobile')),
Field('website', label=T('Website')),
Field('email', label=T('Email')),
Field('contact_form_email', label=T('Contact form email')),
Field('contact_form_cc', label=T('Contact form cc')),
Field('contact_form_bcc', label=T('Contact form cci')),
Field('show_in_address_component', 'boolean', default=True, label=T('Show in address component')),
Field('show_in_contact_form', 'boolean', default=True, label=T('Show in contact form'))
)
db.contact.website.requires = IS_EMPTY_OR(IS_URL())
db.contact.email.requires = IS_EMPTY_OR(IS_EMAIL())
db.contact.contact_form_email.requires = IS_EMPTY_OR(IS_EMAIL())
db.contact.contact_form_cc.requires = IS_EMPTY_OR(IS_EMAIL())
db.contact.contact_form_bcc.requires = IS_EMPTY_OR(IS_EMAIL())
db.define_table('website_parameters',
Field('last_fixture_date', 'date', label=T('Last fixture date'), comment=T('When last_fixture_date < current date, we apply the fixtures (see models/x_fixtures.py)')),
Field('website_name_long', label=T('Website name long'), comment=T('Shown in the banner footer')),
Field('website_name', label=T('Website name'), comment=T('Shown in top left logo')),
Field('website_title', label=T('Website title'), comment=T('Displayed instead of the banner if "with_banner"=False')),
Field('website_subtitle', label=T('Website subtitle'), comment=T('Shown in the banner footer')),
Field('website_url', label=T('Url'), comment=T('URL of the website')),
Field('force_language', label=T('Force a language (en, it, es, fr, ...)')),
Field('booking_form_email', label=T('Booking form email'), comment=T('Messages of the booking form will be sent to this email')),
Field('booking_form_cc', label=T('Booking form cc'), comment=T('Messages of the booking form will be cc to this email')),
Field('booking_form_bcc', label=T('Booking form cci'), comment=T('Messages of the booking form will be cci to this email')),
Field('max_old_news_to_show', 'integer',label=T('Max number of old news'), comment=T('How many old news (date < current date) shall we show?')),
Field('max_gallery_images_to_show', 'integer',label=T('Max number of images in gallery'), comment=T('How many maximum images shall we show in photo gallery?')),
Field('mailserver_url', label=T('Mail server url'), comment=T('URL of the mailserver (used to send email in forms)')),
Field('mailserver_port', 'integer', label=T('Mail server port'), comment=T('Port of the mailserver (used to send email in forms)')),
Field('mailserver_sender_mail', label=T('Mail server sender email'), comment=T('Sender email adress of the mailserver (used to send email in forms)')),
Field('mailserver_sender_login', label=T('Mail server sender login'), comment=T('Login of the mailserver (used to send email in forms)')),
Field('mailserver_sender_pass', label=T('Mail server sender pass'), comment=T('Pass of the mailserver (used to send email in forms)')),
Field('google_analytics_id', label=T('Google analytics id'), comment=T('Your Google Analytics account ID')),
Field('navbar_inverse', 'boolean', default=True, label=T('Inverse navbar color')),
Field('with_banner', 'boolean', default=True, label=T('Show a banner'), comment=T('If False, we display website_title instead')),
Field('with_specific_banner', 'boolean', label=T('Use the specific banner'), comment=T('Include the content of "views/specificbanner.html"')),
Field('add_website_name_as_logo', 'boolean', label=T('Add the website name as a logo'), comment=T('Will be displayed at the top left corner')),
Field('custom_bootstrap_css_file', label=T('Name of the custom bootstrap CSS file')),
Field('banner_image_always', label=T('Banner image always shown'), comment=T('URI of the image which will be always shown in the banner')),
Field('banner_image_desktop', label=T('Banner image shown on desktop mode'), comment=T('URI of the image which will be shown in the banner on desktop mode onlw')),
Field('banner_image_tablet', label=T('Banner image shown on tablet mode'), comment=T('URI of the image which will be shown in the banner on tablet mode only')),
Field('banner_image_phone', label=T('Banner image shown on phone mode'), comment=T('URI of the image which will be shown in the banner on phone mode only')),
Field('banner_image_background_gradient_from', label=T('Banner image background gradient from'), comment=T('Start color to display a gradient behind banner image')),
Field('banner_image_background_gradient_to', label=T('Banner image background gradient to'), comment=T('End color to display a gradient behind banner image')),
Field('seo_website_title', label=T('SEO : Website title'), comment=T('Displayed in <title> tag of the HTML source code')),
Field('seo_meta_author', label=T('SEO : Meta "author"'), comment=T('Displayed in <meta author> tag of the HTML source code')),
Field('seo_meta_description', label=T('SEO : Meta "description"'), comment=T('Displayed in <meta description> tag of the HTML source code')),
Field('seo_meta_keywords', label=T('SEO : Meta "keywords"'), comment=T('Displayed in <meta keywords> tag of the HTML source code')),
Field('seo_meta_generator', label=T('SEO : Meta "generator"'), comment=T('Displayed in <meta generator> tag of the HTML source code')),
Field('show_booking_menu', 'boolean', default=True, label=T('Show booking menu'), comment=T('Show the booking menu (to manage booking requests)')),
Field('show_event_menu', 'boolean', default=True, label=T('Show event menu'), comment=T('Show the event menu (to manage events on a calendar)')),
Field('disqus_shortname', default=True, label=T('Disqus shortname'), comment=T('Add here your disqus shortname to activate comments on your pages. Note : you need to fill "website_url" too!'))
)
db.website_parameters.website_url.requires = IS_EMPTY_OR(IS_URL())
db.website_parameters.mailserver_sender_mail.requires = IS_EMPTY_OR(IS_EMAIL())
db.website_parameters.booking_form_email.requires = IS_EMPTY_OR(IS_EMAIL())
db.website_parameters.booking_form_cc.requires = IS_EMPTY_OR(IS_EMAIL())
db.website_parameters.booking_form_bcc.requires = IS_EMPTY_OR(IS_EMAIL())
db.define_table('page_component',
Field('controller', readable=False, writable=False, default='default', label=T('Component controller')),
Field('name', unique=False, readable=False, writable=False, label=T('Component name')),
Field('description', readable=False, writable=False, label=T('Component description')),
Field('ajax', 'boolean', readable=False, writable=False, default=False, label=T('Component with Ajax')),
Field('ajax_trap', 'boolean', readable=False, writable=False, default=False, label=T('Component with Ajax trap')),
Field('container_class', readable=False, writable=False, label=T('Class of the container'), comment=T('For example "hidden-phone"')),
Field('parent', 'reference page_component', label=T('Parent')),
Field('rank', 'integer', readable=True, writable=True, default=0, label=T('Rank')),
)
db.page_component.parent.requires = IS_EMPTY_OR(IS_IN_DB(db, db.page_component.id, '%(description)s', zero=T('<Empty>')))
db.define_table('page',
Field('parent', 'reference page', label=T('Parent')),
Field('title', unique=True, notnull=True, label=T('Title')),
Field('rank', 'integer', readable=True, writable=True, default=0, label=T('Rank')),
Field('subtitle', label=T('Subtitle')),
Field('url', unique=True, readable=True, writable=True, label=T('Url')),
Field('content', 'text', label=T('Content')),
Field('is_index', 'boolean', readable=True, writable=True, default=False, label=T('Is index')),
Field('is_enabled', 'boolean', readable=True, writable=True, default=True, label=T('Is enabled')),
Field('left_sidebar_enabled', 'boolean', default=False, label=T('Left sidebar')),
Field('right_sidebar_enabled', 'boolean', default=False, label=T('Right sidebar')),
Field('header_component', 'reference page_component', label=T('Header component')),
Field('left_sidebar_component', 'reference page_component', label=T('Left sidebar component')),
Field('right_sidebar_component', 'reference page_component', label=T('Right sidebar component')),
Field('left_footer_component', 'reference page_component', label=T('Left footer component')),
Field('middle_footer_component', 'reference page_component', label=T('Middle footer component')),
Field('right_footer_component', 'reference page_component', label=T('Right footer component')),
Field('central_component', 'reference page_component', label=T('Central component')),
Field('allow_disqus', 'boolean', label=T('Allow disqus (must be configured in website_parameters)')),
Field('max_content_height', 'integer', readable=True, writable=True, default=0, label=T('Max height (in pixels) of the page content (0 = no max height)')),
format='%(title)s'
)
db.page.parent.requires = IS_EMPTY_OR(IS_IN_DB(db, db.page.id, '%(title)s', zero=T('<Empty>')))
db.page.header_component.requires = IS_EMPTY_OR(IS_IN_DB(db, db.page_component.id, '%(name)s - %(description)s', zero=T('<Empty>')))
db.page.left_sidebar_component.requires = IS_EMPTY_OR(IS_IN_DB(db, db.page_component.id, '%(name)s - %(description)s', zero=T('<Empty>')))
db.page.right_sidebar_component.requires = IS_EMPTY_OR(IS_IN_DB(db, db.page_component.id, '%(name)s - %(description)s', zero=T('<Empty>')))
db.page.left_footer_component.requires = IS_EMPTY_OR(IS_IN_DB(db, db.page_component.id, '%(name)s - %(description)s', zero=T('<Empty>')))
db.page.right_footer_component.requires = IS_EMPTY_OR(IS_IN_DB(db, db.page_component.id, '%(name)s - %(description)s', zero=T('<Empty>')))
db.page.middle_footer_component.requires = IS_EMPTY_OR(IS_IN_DB(db, db.page_component.id, '%(name)s - %(description)s', zero=T('<Empty>')))
db.page.central_component.requires = IS_EMPTY_OR(IS_IN_DB(db, db.page_component.id, '%(name)s - %(description)s', zero=T('<Empty>')))
db.page.url.compute = lambda row: IS_SLUG()(row.title)[0]
pageSelector = HierarchicalSelect(db, db.page, db.page.title, db.page.rank)
db.page.parent.widget = pageSelector.widget
db.define_table('image',
Field('page', 'reference page', label=T('Page')),
Field('name', notnull=True, label=T('Name')),
Field('alt', label=T('Alt')),
Field('comment', label=T('Comment')),
Field('file', 'upload', uploadfolder=path.join(
request.folder,'static','images','photo_gallery'
), autodelete=True, label=T('File')),
Field('thumb', 'text', readable=False, writable=False, label=T('Thumb')),
Field('show_in_gallery', 'boolean', readable=True, writable=True, default=True, label=T('Show in gallery')),
Field('show_in_banner', 'boolean', readable=True, writable=True, default=False, label=T('Show in banner')),
format='%(name)s'
)
db.image.page.requires = IS_EMPTY_OR(IS_IN_DB(db, db.page.id, '%(title)s', zero=T('<Empty>')))
db.image.alt.compute = lambda row: row.name.capitalize()
#db.image.page.widget = pageSelector.widget
db.define_table('registered_user',
Field('first_name', label=T('First name')),
Field('last_name', label=T('Last name')),
Field('email', unique=True, requires=[IS_EMAIL(), IS_NOT_IN_DB(db, 'registered_user.email')], label=T('Email')),
Field('subscribe_to_newsletter', 'boolean', default=True, label=T("User want to receive newsletter emails")),
format='%(email)s'
)
db.define_table('news',
Field('title', label=T('Title')),
Field('date','date',default=request.now,label=T('Date')),
Field('text','text',label=T('News content')),
Field('published_on', 'datetime', default=request.now),
Field('send_mail', 'boolean', readable=True, writable=True, default=False, label=T('Send an email to the registered users to inform them')),
Field('mail_sent', 'boolean', readable=False, writable=False, default=False, label=T('An email has been send to the registered users')),
Field('max_content_height', 'integer', readable=True, writable=True, default=0, label=T('Max height (in pixels) of the news content (0 = no max height)')),
format='%(text)s'
)
db.define_table('file',
Field('page', 'reference page', label=T('Page')),
Field('title', label=T('Title'), notnull=True),
Field('comment', label=T('Comment')),
Field('file', 'upload', uploadfolder=path.join(
request.folder,'static','uploaded_files'
), notnull=True, autodelete=True, label=T('File')),
Field('protected', 'boolean', readable=True, writable=True, default=False, label=T('Protected (visible only for authorized users)')),
Field('size', 'double', readable=False, writable=False, label=T('Size')),
format='%(title)s'
)
db.file.page.requires = IS_EMPTY_OR(IS_IN_DB(db, db.page.id, '%(title)s', zero=T('<Empty>')))
db.file.size.compute = lambda row: path.getsize(path.join(request.folder,'static','uploaded_files',row.file))
#db.file.page.widget = pageSelector.widget
## after defining tables, uncomment below to enable auditing
auth.enable_record_versioning(db) | 0.417628 | 0.094929 |
import sqlite3
import dataclasses
from typing import Generator, List
from src.helpers import helpers # type: ignore
from src.logger import config_logger #type: ignore
@dataclasses.dataclass
class BackupReader(config_logger.Logger):
"""Read data from Outlook 2016 Backup folder.
Attributes:
backup_location: A string of backup folder location.
profile_data_location: A string of data folder location based on user.
"""
profile_data_location: str
filter_hidden = '/.'
@property
def db_location(self):
sqlitedb = 'Outlook.sqlite'
try:
return helpers.Helper.test_location(
self.profile_data_location + sqlitedb, 'file')
except FileNotFoundError:
self.logger.warning(f'Missing {sqlitedb}, is the database missing?')
raise FileNotFoundError
def get_mails_from_database(self) -> Generator:
self.logger.info('Getting emails from database')
db_connection = self._connect_to_database(self.db_location)
mails = self._get_mails(db_connection)
for mail in mails:
yield self._get_info_from_email(dict(mail))
def get_mails_amount(self) -> int:
db_connection = self._connect_to_database(self.db_location)
mails = self._get_mails(db_connection)
return len(list(mails))
def _get_info_from_email(self, mail: dict) -> dict:
return {
'content_path' : mail.get('PathToDataFile'),
'subject' : mail.get('Message_ThreadTopic'),
'time' : mail.get('Message_TimeReceived'),
'sender' : {'email': mail.get('Message_SenderAddressList'),
'name': mail.get('Message_SenderList')},
'recipients' : self._merge_recipients(
mail.get('Message_ToRecipientAddressList'),
mail.get('Message_RecipientList')),
'cc' : mail.get('Message_CCRecipientAddressList'),
'type' : mail.get('Message_type'),
'id': mail.get('Threads_ThreadID'),
}
def _merge_recipients(self, emails: str, names: str) -> List:
try:
emails_split = emails.split(';')
names_split = names.split(';')
except AttributeError:
return [None]
recipients = []
for email, name in zip(emails_split, names_split):
recipients.append({
'email': email,
'name': name
})
return recipients
def _connect_to_database(self, db_location):
db_connection = sqlite3.connect(db_location)
db_connection.row_factory = sqlite3.Row
return db_connection.cursor()
def _get_mails(self, db_connection):
return db_connection.execute('SELECT * FROM Mail')
def _get_tables(self, db_connection):
return db_connection.execute("SELECT * FROM sqlite_schema WHERE type IN ('table','view') AND name NOT LIKE 'sqlite_%' ") | src/backupreader/reader.py | import sqlite3
import dataclasses
from typing import Generator, List
from src.helpers import helpers # type: ignore
from src.logger import config_logger #type: ignore
@dataclasses.dataclass
class BackupReader(config_logger.Logger):
"""Read data from Outlook 2016 Backup folder.
Attributes:
backup_location: A string of backup folder location.
profile_data_location: A string of data folder location based on user.
"""
profile_data_location: str
filter_hidden = '/.'
@property
def db_location(self):
sqlitedb = 'Outlook.sqlite'
try:
return helpers.Helper.test_location(
self.profile_data_location + sqlitedb, 'file')
except FileNotFoundError:
self.logger.warning(f'Missing {sqlitedb}, is the database missing?')
raise FileNotFoundError
def get_mails_from_database(self) -> Generator:
self.logger.info('Getting emails from database')
db_connection = self._connect_to_database(self.db_location)
mails = self._get_mails(db_connection)
for mail in mails:
yield self._get_info_from_email(dict(mail))
def get_mails_amount(self) -> int:
db_connection = self._connect_to_database(self.db_location)
mails = self._get_mails(db_connection)
return len(list(mails))
def _get_info_from_email(self, mail: dict) -> dict:
return {
'content_path' : mail.get('PathToDataFile'),
'subject' : mail.get('Message_ThreadTopic'),
'time' : mail.get('Message_TimeReceived'),
'sender' : {'email': mail.get('Message_SenderAddressList'),
'name': mail.get('Message_SenderList')},
'recipients' : self._merge_recipients(
mail.get('Message_ToRecipientAddressList'),
mail.get('Message_RecipientList')),
'cc' : mail.get('Message_CCRecipientAddressList'),
'type' : mail.get('Message_type'),
'id': mail.get('Threads_ThreadID'),
}
def _merge_recipients(self, emails: str, names: str) -> List:
try:
emails_split = emails.split(';')
names_split = names.split(';')
except AttributeError:
return [None]
recipients = []
for email, name in zip(emails_split, names_split):
recipients.append({
'email': email,
'name': name
})
return recipients
def _connect_to_database(self, db_location):
db_connection = sqlite3.connect(db_location)
db_connection.row_factory = sqlite3.Row
return db_connection.cursor()
def _get_mails(self, db_connection):
return db_connection.execute('SELECT * FROM Mail')
def _get_tables(self, db_connection):
return db_connection.execute("SELECT * FROM sqlite_schema WHERE type IN ('table','view') AND name NOT LIKE 'sqlite_%' ") | 0.635901 | 0.134208 |
from Bio import SeqIO
import os
import glob
import argparse
def main():
parser = argparse.ArgumentParser(description='Merges Phyluce UCEs from SPAdes and rnaSPAdes')
parser.add_argument('-o', type=str,
help='Output Folder', required=True)
parser.add_argument('-i', type=str,
help='Input folder of merged fastas', required=True)
args = parser.parse_args()
print("Counts merged_uces into a summary file in {} directory".format(args.o))
count_uces(args.o, args.i)
def count_uces(output_directory, input_directory):
# Gather each specimen file produced from the Phyluce
merged_fastas = glob.glob(os.path.join(input_directory, "*_merged.fasta"))
# Put all the contigs into a single dictionary
specimen_dict = {}
for fasta in merged_fastas:
specimen = os.path.basename(fasta)
specimen_name = specimen.replace("_merged.fasta", "").replace("-","_")
with open(fasta) as f:
count = 0
abyss_count = 0
spades_count = 0
rnaspades_count = 0
abyss_u_count = 0
for seq in SeqIO.parse(fasta, 'fasta'):
if "_AU" in seq.id[-3:]:
abyss_u_count += 1
elif "_A" in seq.id[-2:]:
abyss_count += 1
elif "_R" in seq.id[-2:]:
rnaspades_count += 1
elif "_S" in seq.id[-2:]:
spades_count += 1
count += 1
if specimen_name in specimen_dict:
specimen_dict[specimen_name] = [count, abyss_count, abyss_u_count, spades_count, rnaspades_count]
else:
specimen_dict[specimen_name] = [count, abyss_count, abyss_u_count, spades_count, rnaspades_count]
output_file = os.path.join(output_directory, "merged_uce_summary.csv")
with open(output_file, "w") as g:
g.write("Specimen, Merged Targets, Abyss Contribution, Abyss Unmerged Contribution, SPAdes Contribution, rnaSPAdes Contribution\n")
for key, value in specimen_dict.items():
g.write("{},{},{},{},{},{}\n".format(key, value[0],value[1],value[2],value[3],value[4]))
if __name__ == "__main__":
main() | pipeline_files/count_uces.py | from Bio import SeqIO
import os
import glob
import argparse
def main():
parser = argparse.ArgumentParser(description='Merges Phyluce UCEs from SPAdes and rnaSPAdes')
parser.add_argument('-o', type=str,
help='Output Folder', required=True)
parser.add_argument('-i', type=str,
help='Input folder of merged fastas', required=True)
args = parser.parse_args()
print("Counts merged_uces into a summary file in {} directory".format(args.o))
count_uces(args.o, args.i)
def count_uces(output_directory, input_directory):
# Gather each specimen file produced from the Phyluce
merged_fastas = glob.glob(os.path.join(input_directory, "*_merged.fasta"))
# Put all the contigs into a single dictionary
specimen_dict = {}
for fasta in merged_fastas:
specimen = os.path.basename(fasta)
specimen_name = specimen.replace("_merged.fasta", "").replace("-","_")
with open(fasta) as f:
count = 0
abyss_count = 0
spades_count = 0
rnaspades_count = 0
abyss_u_count = 0
for seq in SeqIO.parse(fasta, 'fasta'):
if "_AU" in seq.id[-3:]:
abyss_u_count += 1
elif "_A" in seq.id[-2:]:
abyss_count += 1
elif "_R" in seq.id[-2:]:
rnaspades_count += 1
elif "_S" in seq.id[-2:]:
spades_count += 1
count += 1
if specimen_name in specimen_dict:
specimen_dict[specimen_name] = [count, abyss_count, abyss_u_count, spades_count, rnaspades_count]
else:
specimen_dict[specimen_name] = [count, abyss_count, abyss_u_count, spades_count, rnaspades_count]
output_file = os.path.join(output_directory, "merged_uce_summary.csv")
with open(output_file, "w") as g:
g.write("Specimen, Merged Targets, Abyss Contribution, Abyss Unmerged Contribution, SPAdes Contribution, rnaSPAdes Contribution\n")
for key, value in specimen_dict.items():
g.write("{},{},{},{},{},{}\n".format(key, value[0],value[1],value[2],value[3],value[4]))
if __name__ == "__main__":
main() | 0.3492 | 0.192767 |
import time
import sys
urunler = {
"1":("su", 1),
"2":("çay", 2),
"3":("kahve", 3),
"4":("enerji içeceği", 5),
"5":("paket portakal suyu", 7),
"6":("taze portakal suyu", 11),
}
def input(mesaj):
sys.stdout.write(mesaj)
sys.stdout.flush()
girdiler = []
while True:
girdi = sys.stdin.read(1)
if girdi == "\n":
break
else:
girdiler.append(girdi)
return "".join(girdiler)
def sonsuz_girdi(mesaj, beklenen):
while True:
girdi = input(mesaj)
if girdi in beklenen:
return girdi
else:
print("Bir hata var burada")
def sayisal_sonsuz_girdi(mesaj):
while True:
girdi = input(mesaj)
if girdi.isdigit():
return int(girdi)
else:
print("Bir hata var burada")
def karsilama():
print("\n".join(karsilama_str()))
def karsilama_str():
degerler = []
degerler.append("Hoşgeldiniz")
degerler.append("Ürünler:")
for anahtar, deger in urunler.items():
degerler.append(" ".join((anahtar, "|", str(deger))))
return degerler
def veda():
print("Tekrar bekleriz")
time.sleep(2)
print("\n"*23)
if __name__ == "__main__":
import signal
import argparse
kasa = 0
def _usr1_handler(signal, context):
print("Kasa para:", kasa, file=sys.stderr, flush=True)
signal.signal(signal.SIGUSR1, _usr1_handler)
parser = argparse.ArgumentParser(description=' ## '.join(karsilama_str()+
["", "Hata kodu 1 = yetersiz bakiye"]))
parser.add_argument("-p", '--para',type=int, help='girilen para')
parser.add_argument("-u", '--urun',type=int, help='istenen urun')
if len(sys.argv) >1:
argumanlar = parser.parse_args()
urun_girdi = argumanlar.urun
urun, fiyat = urunler[str(urun_girdi)]
para_girdi = argumanlar.para
if fiyat > para_girdi:
exit(1)
else:
print("Ürününüzü Alınız", urun)
print("Para Üstü", para_girdi - fiyat)
exit()
else:
while True:
karsilama()
para_girdi = sayisal_sonsuz_girdi("Lütfen Para giriniz: ")
urun_girdi = sonsuz_girdi("Ürün Seçiniz: ", urunler)
urun, fiyat = urunler[urun_girdi]
if fiyat > para_girdi:
print("Paranız yetesiz geldiğinden iade edilmiştir", para_girdi)
else:
kasa += fiyat
print("Ürününüzü Alınız", urun)
print("Para Üstü", para_girdi - fiyat)
veda() | basit_otomat_v1.py | import time
import sys
urunler = {
"1":("su", 1),
"2":("çay", 2),
"3":("kahve", 3),
"4":("enerji içeceği", 5),
"5":("paket portakal suyu", 7),
"6":("taze portakal suyu", 11),
}
def input(mesaj):
sys.stdout.write(mesaj)
sys.stdout.flush()
girdiler = []
while True:
girdi = sys.stdin.read(1)
if girdi == "\n":
break
else:
girdiler.append(girdi)
return "".join(girdiler)
def sonsuz_girdi(mesaj, beklenen):
while True:
girdi = input(mesaj)
if girdi in beklenen:
return girdi
else:
print("Bir hata var burada")
def sayisal_sonsuz_girdi(mesaj):
while True:
girdi = input(mesaj)
if girdi.isdigit():
return int(girdi)
else:
print("Bir hata var burada")
def karsilama():
print("\n".join(karsilama_str()))
def karsilama_str():
degerler = []
degerler.append("Hoşgeldiniz")
degerler.append("Ürünler:")
for anahtar, deger in urunler.items():
degerler.append(" ".join((anahtar, "|", str(deger))))
return degerler
def veda():
print("Tekrar bekleriz")
time.sleep(2)
print("\n"*23)
if __name__ == "__main__":
import signal
import argparse
kasa = 0
def _usr1_handler(signal, context):
print("Kasa para:", kasa, file=sys.stderr, flush=True)
signal.signal(signal.SIGUSR1, _usr1_handler)
parser = argparse.ArgumentParser(description=' ## '.join(karsilama_str()+
["", "Hata kodu 1 = yetersiz bakiye"]))
parser.add_argument("-p", '--para',type=int, help='girilen para')
parser.add_argument("-u", '--urun',type=int, help='istenen urun')
if len(sys.argv) >1:
argumanlar = parser.parse_args()
urun_girdi = argumanlar.urun
urun, fiyat = urunler[str(urun_girdi)]
para_girdi = argumanlar.para
if fiyat > para_girdi:
exit(1)
else:
print("Ürününüzü Alınız", urun)
print("Para Üstü", para_girdi - fiyat)
exit()
else:
while True:
karsilama()
para_girdi = sayisal_sonsuz_girdi("Lütfen Para giriniz: ")
urun_girdi = sonsuz_girdi("Ürün Seçiniz: ", urunler)
urun, fiyat = urunler[urun_girdi]
if fiyat > para_girdi:
print("Paranız yetesiz geldiğinden iade edilmiştir", para_girdi)
else:
kasa += fiyat
print("Ürününüzü Alınız", urun)
print("Para Üstü", para_girdi - fiyat)
veda() | 0.056379 | 0.188641 |
import matplotlib.pyplot as plt
from matplotlib import style
from sklearn.decomposition import PCA
import multiprocessing
from MulticoreTSNE import MulticoreTSNE
import numpy as np
import pandas as pd
import seaborn as sns
from collections import defaultdict
from scipy.cluster import hierarchy
from mpl_toolkits.axes_grid1 import make_axes_locatable
import os
from .palettes import pallete_50
class Visualize():
'''class for embedding visualization'''
def __init__(self):
self.X = None
self.position_flag = False
def fit_tsne(self,
perplexity=30,
learning_rate=1000,
early_exaggeration=12,
metric='correlation',
n_comp=False,
multicore=True,
fdr=None, sample_variance=None):
"""
Embedding of single cell data with t-distributed stochastic neighborhood
embedding (tSNE) for 2D visualization. By default, we use Multicore-tsne
implementation by <NAME>
https://github.com/DmitryUlyanov/Multicore-TSNE>,
if number of processors is great than 1. Otherwise, scikit-learn
implementation is used. Default parametes by scikit-learn are used.
Parameters
----------
perplexity : float, optional (default: 30)
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets usually
require a larger perplexity. Consider selecting a value between 5 and 50.
The choice is not extremely critical since t-SNE is quite insensitive to
this parameter.
early_exaggeration : 'float', optional (default: 12.0)
Controls how tight natural clusters in the original space are in the
embedded space and how much space will be between them. For larger
values, the space between natural clusters will be larger in the
embedded space. Again, the choice of this parameter is not very
critical. If the cost function increases during initial optimization,
the early exaggeration factor or the learning rate might be too high.
learning_rate : 'float', optional (default: 1000)
Note that the R-package "Rtsne" uses a default of 200.
The learning rate can be a critical parameter. It should be
between 100 and 1000. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high. If the cost function gets stuck in a bad local
minimum increasing the learning rate helps sometimes.
Returns
-------
Updated insance self, with self.embedding containing 2D t-SNE coordianates
"""
if self.X is None:
raise ValueError('Nothing to plot, please fit the data first')
else:
self.selected_genes = self.select_genes(fdr, sample_variance)
self.X_vis = self.X.copy(
)[:, self.normal_genes.isin(self.selected_genes)]
if not self._selection_flag:
print(
'Criterium for selection of genes undefined, please select FDR < 1 OR sample variance > 0')
print(' ')
if n_comp:
pca = PCA(n_components=n_comp, svd_solver='full')
self.X_vis = pca.fit_transform(self.X_vis)
n_jobs = multiprocessing.cpu_count()
if n_jobs > 1 and multicore:
tsne = MulticoreTSNE(n_jobs=n_jobs,
init='random',
metric=metric,
perplexity=perplexity,
learning_rate=learning_rate,
early_exaggeration=early_exaggeration)
print(
'computing t-SNE, using Multicore t-SNE for {0} jobs'.format(n_jobs))
# need to transform to float64 for MulticoreTSNE...
self.embedding = tsne.fit_transform(self.X_vis.astype('float64'))
else:
print('computing t-SNE, using scikit-learn implementation')
tsne = manifold.TSNE(n_components=2,
init='pca',
random_state=0,
metric='correlation',
perplexity=perplexity)
self.embedding = tsne.fit_transform(self.X_vis)
print('atribute embedding is updated with t-SNE coordinates')
if n_comp:
self.X_vis = self.X.copy(
)[:, self.normal_genes.isin(self.selected_genes)]
return
def fit_pca(self, n_comp=2, fdr=None, sample_variance=None):
'''2D PCA of the Data based on first 2 principal components'''
if not self._selection_flag:
print(
'Criterium for selection of genes undefined, please select FDR < 1 OR sample variance > 0')
print(' ')
if self.X is None:
raise ValueError('Nothing to plot, please fit the data first')
else:
self.selected_genes = self.select_genes(fdr, sample_variance)
self.X_vis = self.X.copy(
)[:, self.normal_genes.isin(self.selected_genes)]
pca = PCA(n_components=n_comp, svd_solver='full')
self.embedding = pca.fit_transform(self.X_vis)
print('atribute embedding is updated with t-SNE coordinates')
return
def plot(self,
path=False,
title=False,
labels=False,
palette=pallete_50,
gene=False,
data=False,
size=4.5,
fontsize=13.5,
legend=False,
legendcol=5,
points=5,
xytitle='t-SNE',
tight=False
):
"Ploting labels"
fontsize = size*2.7
if labels is not False:
if not palette:
palette = sns.color_palette("husl", len(set(labels))+1)
if 0 in labels or 200001 in labels:
palette = palette
else:
palette = self.palette_hierarchy
with sns.plotting_context("paper", font_scale=1.5):
self.style()
g = sns.lmplot(x='x',
y='y',
fit_reg=False,
scatter_kws={'s': points,
'alpha': 1},
hue='label',
data=pd.DataFrame(self.embedding,
columns=['x', 'y'])
.join(pd.Series(labels, name='label')),
height=size,
palette=sns.set_palette(palette),
legend=False)
g.set(yticks=[], xticks=[])
if legend is not False:
if title:
sep = 1.05
else:
sep = 1.0
if type(legend) is list:
plt.legend(legend,
loc='lower center',
bbox_to_anchor=(0.5, sep),
ncol=legendcol,
frameon=True,
markerscale=size//2,
fontsize=size+3)
elif legend is True:
plt.legend(loc='lower center',
bbox_to_anchor=(0.5, sep),
ncol=legendcol,
frameon=True,
markerscale=np.floor(size/2.),
fontsize=size+3.0)
plt.xlabel(xytitle + '1', fontsize=fontsize)
plt.ylabel(xytitle + '2', fontsize=fontsize)
plt.autoscale(enable=True, axis='both', tight=tight)
elif type(gene) is list:
if gene[0] == 'library':
color = (self.X3.T > 0).sum()
else:
color = self.X3[gene].T.mean()
self.style()
fig = plt.figure(figsize=(size, size + 0.5), dpi=100)
g = plt.scatter(self.embedding[:, 0],
self.embedding[:, 1],
s=points,
c=color,
alpha=1,
cmap='coolwarm'
)
plt.xlabel(xytitle + '1', fontsize=fontsize)
plt.ylabel(xytitle + '2', fontsize=fontsize)
plt.gca().set_xticks([])
plt.gca().set_yticks([])
plt.autoscale(enable=True, axis='both', tight=tight)
divider = make_axes_locatable(plt.gca())
cax = divider.append_axes("right", "2.5%", pad="1%")
if gene[0] == 'library':
plt.colorbar(g, cax=cax, label='library complexity')
else:
plt.colorbar(g, cax=cax, label='log2(1+TPM)')
elif type(gene) is tuple:
n = len(gene)
nrow = int(np.sqrt(n))
ncol = int(np.ceil(n / nrow))
if (n % 2 != 0 and n > 3) or nrow * ncol < n:
ncol = ncol+1
if n < 4:
fig, axs = plt.subplots(nrow, ncol, dpi=100,
figsize=(ncol*size*1.5,
nrow*size*1.5)
)
else:
fig, axs = plt.subplots(nrow, ncol, dpi=100,
figsize=(ncol*size,
nrow*size)
)
if nrow*ncol > n:
for i in range(ncol*nrow - n):
fig.delaxes(axs[-1][-(i+1)])
if type(axs) != np.ndarray:
axs = [axs]
else:
axs = axs.ravel()
for i in range(n):
if i < n:
if type(gene[i]) is list:
marker = gene[i]
else:
marker = [gene[i]]
if marker[0] == 'library':
color = (self.X3.T > 0).sum()
else:
color = self.X3[marker].T.mean()
self.style()
g = axs[i].scatter(self.embedding[:, 0],
self.embedding[:, 1],
s=points,
c=color,
alpha=1,
cmap='coolwarm'
)
axs[i].set_xticks([])
axs[i].set_yticks([])
axs[i].autoscale(enable=True, axis='both', tight=tight)
divider = make_axes_locatable(axs[i])
cax = divider.append_axes("right", "2.5%", pad="1%")
if marker[0] == 'library':
fig.colorbar(g, cax=cax,
label='library complexity')
else:
fig.colorbar(g, cax=cax)
if title:
axs[i].set_title(title)
else:
if len(marker) < 2:
axs[i].set_title(
str(marker[0]), fontsize=fontsize-2)
elif len(marker) > 1:
axs[i].set_title(
'list starting with ' + str(marker[0]), fontsize=fontsize-2)
if i % ncol == 0:
axs[i].set_ylabel(xytitle+'2', fontsize=fontsize)
if ((i // ncol) + 1) == nrow:
axs[i].set_xlabel(xytitle+'1', fontsize=fontsize)
else:
with sns.plotting_context("paper", font_scale=1.5):
self.style()
g = sns.lmplot(x='x',
y='y',
fit_reg=False,
scatter_kws={'s': points,
'alpha': .9, 'color': 'black'},
hue=None,
data=pd.DataFrame(self.embedding,
columns=['x', 'y']),
height=size,
aspect=1,
legend=False,
)
g.set(yticks=[], xticks=[])
plt.xlabel(xytitle + '1', fontsize=fontsize)
plt.ylabel(xytitle + '2', fontsize=fontsize)
plt.autoscale(enable=True, axis='both', tight=tight)
sns.despine(top=False, right=False, left=False, bottom=False)
if title:
plt.title(title)
if path:
plt.savefig(path, bbox_inches='tight')
plt.show()
def _get_cluster_classes(self, den, label='ivl'):
cluster_idxs = defaultdict(list)
for c, pi in zip(den['color_list'], den['icoord']):
for leg in pi[1:3]:
i = (leg - 5.0) / 10.0
if abs(i - int(i)) < 1e-5:
cluster_idxs[c].append(int(i))
cluster_classes = {}
for c, l in cluster_idxs.items():
i_l = [den[label][i] for i in l]
cluster_classes[c] = i_l
return cluster_classes
def visual_hierarchy(self,
thrs=None,
path=False,
palette=pallete_50,
cell_label=False,
value_range=5):
# thrs is te threshold in dendrogram
# cell_label if one wants to add extra info in columns colors
self.style_mp_stat()
pop = pd.DataFrame(self.X_vis, index=self.normal_cells,
columns=self.selected_genes)
pop = pop.T
palette_clust = palette
Y = self.h2
X = self.h1
if thrs is None:
thrs = 0.5*max(Y[:, 2])
else:
thrs = thrs
hierarchy.set_link_color_palette(palette_clust)
denC = hierarchy.dendrogram(
Y, labels=pop.columns, color_threshold=thrs, no_plot=True)
clustersC = self._get_cluster_classes(denC)
clusterC = []
for i in pop.columns:
included = False
for j in clustersC.keys():
if i in clustersC[j]:
clusterC.append(j)
included = True
if not included:
clusterC.append(None)
if cell_label:
gC = sns.clustermap(pop,
linewidths=0, xticklabels=False, yticklabels=False, cbar_kws={"orientation": "vertical"},
center=0, figsize=(8, 6), vmin=-value_range, vmax=value_range, col_colors=[clusterC, cell_label], col_linkage=Y, row_linkage=X, cmap='coolwarm')
gC.cax.set_visible(True)
else:
gC = sns.clustermap(pop,
linewidths=0, xticklabels=False, yticklabels=False, vmin=-value_range, vmax=value_range,
cbar_kws={"orientation": "vertical"}, center=0, figsize=(7, 7), col_colors=clusterC, col_linkage=Y, row_linkage=X, cmap='coolwarm')
if path:
plt.savefig(path, bbox_inches='tight')
tabC = pd.DataFrame()
for i in set(denC['color_list']):
color_cluster = pd.Series(clusterC).apply(
lambda x: x == str(i)).astype(int).values.tolist()
tabC[i] = color_cluster
if len(tabC.index) == len(pop.columns):
tabC.index = pop.columns
tabC = tabC[list(set(tabC.columns.tolist()) & set(palette_clust))]
plt.show()
for i in range(0, tabC.shape[1]):
tabC.T.iloc[i] = tabC.T.iloc[i]*(i+1)
excase = tabC.loc[tabC.T.sum() < 1].index.tolist()
if len(excase) > 0:
for i in excase:
self.embedding = np.delete(
self.embedding, pd.Index(excase).get_loc(i), axis=0)
tabC = tabC.copy()
tabC.drop(excase, axis=0, inplace=True)
self.labels_hierarchy = tabC.T.sum().tolist()
self.palette_hierarchy = tabC.columns.tolist()
else:
print('threshold too small')
def get_cluster_info(self,
labels=False,
cluster=False,
genes=10,
plot_genes=True):
self.style()
cl = pd.DataFrame({'cluster': labels}, index=self.normal_cells)
if cluster is 'all':
return cl
elif type(cluster) is int:
cl_n = cl.loc[cl['cluster'] == cluster].index.tolist()
cl[cl['cluster'] != cluster] = 200000
cl[cl['cluster'] == cluster] = 200001
print('The cluster', cluster, 'has', len(cl_n), 'cells')
self.plot(labels=cl['cluster'].tolist(),
palette=['#d8dcd6', '#49759c'])
print('The top', genes,
'highly expressed signal-like genes in this cluster are:')
top = self.X3.loc[self.normal_cells, self.normal_genes].loc[cl_n, :].mean()\
.sort_values(ascending=False)[:genes].index.tolist()
for i in top:
print(i)
if plot_genes:
self.plot(gene=tuple(top))
else:
print('Select right cluster option')
def get_gene_info(self,
labels=False,
gene=False,
path=False,
legend=False,
size=6.5):
self.style()
if 0 in labels or 200001 in labels:
palette = pallete_50
else:
palette = self.palette_hierarchy
cl = pd.DataFrame({'cluster': labels}, index=self.normal_cells)
cl2 = [self.X3[gene].T.mean()[cl.loc[cl['cluster'] == i].index]
for i in pd.unique(labels)]
if type(legend) is list:
order = legend
index_lab = legend
else:
order = sorted(pd.unique(labels))
index_lab = pd.unique(labels)
cl2 = pd.DataFrame(cl2, index=index_lab)
fig = plt.figure(figsize=[size, 5])
sns.violinplot(data=cl2.T, orient='v', inner=None, order=order,
palette=palette,linewidth=0.5)
sns.stripplot(data=cl2.T, color="black", size=1.5, order=order)
if legend:
plt.xticks(rotation=45)
plt.xlabel('Cluster')
plt.ylabel('log2(1+TPM)')
if path:
plt.savefig(os.path.splitext(path)[
0]+'_violin'+os.path.splitext(path)[1], bbox_inches='tight')
# ------Fig2 ------------
cl['gene'] = self.X3[gene].T.mean()
palette2 = []
pal_list = cl.copy()
for i in sorted(pd.unique(cl['cluster'])):
if cl[cl['cluster'] == i]['gene'].var() < 0.0000000001:
cl.drop(cl[cl['cluster'] == i].index, axis=0, inplace=True)
else:
palette2.append(
palette[sorted(pd.unique(pal_list['cluster'])).index(i)])
if len(palette2) > 0:
palette = palette2
sns.set(style="white", rc={"axes.facecolor": (0, 0, 0, 0)})
g = sns.FacetGrid(cl, row="cluster", hue="cluster", aspect=8.5,
height=.7, palette=palette)
g.map(sns.kdeplot, "gene", clip_on=False,
shade=True, alpha=1, lw=1.5, bw=.13)
g.map(plt.axhline, y=0, lw=2, clip_on=False)
down, up = plt.ylim()
for i in g.row_names:
if type(legend) is list:
mi = legend[g.row_names.index(i)]
g.axes[g.row_names.index(i),0].set_ylabel(mi,y=down*1.2,
labelpad=20,
color=palette[g.row_names.index(i)],
fontsize='medium',rotation=0)
else:
mi = i
g.axes[g.row_names.index(i),0].set_ylabel('cluster '+str(mi),y=down*1.2,
labelpad=20,
color=palette[g.row_names.index(i)],
fontsize='medium',rotation=0)
if len(palette) < 11 and len(palette) > 5:
g.fig.subplots_adjust(hspace=-0.7, top=0.70, bottom=0.05)
elif len(palette) < 6:
g.fig.subplots_adjust(hspace=-0.6, top=0.85, bottom=0.05)
else:
g.fig.subplots_adjust(hspace=-0.7, top=0.50, bottom=0.05)
g.set_titles("")
g.set(yticks=[])
g.despine(bottom=True, left=True)
plt.xlabel('log2(1+TPM)')
if path:
plt.savefig(os.path.splitext(path)[0]+'_ridge'+os.path.splitext(path)[1],
bbox_inches='tight')
self.style()
plt.show() | randomly/visualization.py | import matplotlib.pyplot as plt
from matplotlib import style
from sklearn.decomposition import PCA
import multiprocessing
from MulticoreTSNE import MulticoreTSNE
import numpy as np
import pandas as pd
import seaborn as sns
from collections import defaultdict
from scipy.cluster import hierarchy
from mpl_toolkits.axes_grid1 import make_axes_locatable
import os
from .palettes import pallete_50
class Visualize():
'''class for embedding visualization'''
def __init__(self):
self.X = None
self.position_flag = False
def fit_tsne(self,
perplexity=30,
learning_rate=1000,
early_exaggeration=12,
metric='correlation',
n_comp=False,
multicore=True,
fdr=None, sample_variance=None):
"""
Embedding of single cell data with t-distributed stochastic neighborhood
embedding (tSNE) for 2D visualization. By default, we use Multicore-tsne
implementation by <NAME>
https://github.com/DmitryUlyanov/Multicore-TSNE>,
if number of processors is great than 1. Otherwise, scikit-learn
implementation is used. Default parametes by scikit-learn are used.
Parameters
----------
perplexity : float, optional (default: 30)
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets usually
require a larger perplexity. Consider selecting a value between 5 and 50.
The choice is not extremely critical since t-SNE is quite insensitive to
this parameter.
early_exaggeration : 'float', optional (default: 12.0)
Controls how tight natural clusters in the original space are in the
embedded space and how much space will be between them. For larger
values, the space between natural clusters will be larger in the
embedded space. Again, the choice of this parameter is not very
critical. If the cost function increases during initial optimization,
the early exaggeration factor or the learning rate might be too high.
learning_rate : 'float', optional (default: 1000)
Note that the R-package "Rtsne" uses a default of 200.
The learning rate can be a critical parameter. It should be
between 100 and 1000. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high. If the cost function gets stuck in a bad local
minimum increasing the learning rate helps sometimes.
Returns
-------
Updated insance self, with self.embedding containing 2D t-SNE coordianates
"""
if self.X is None:
raise ValueError('Nothing to plot, please fit the data first')
else:
self.selected_genes = self.select_genes(fdr, sample_variance)
self.X_vis = self.X.copy(
)[:, self.normal_genes.isin(self.selected_genes)]
if not self._selection_flag:
print(
'Criterium for selection of genes undefined, please select FDR < 1 OR sample variance > 0')
print(' ')
if n_comp:
pca = PCA(n_components=n_comp, svd_solver='full')
self.X_vis = pca.fit_transform(self.X_vis)
n_jobs = multiprocessing.cpu_count()
if n_jobs > 1 and multicore:
tsne = MulticoreTSNE(n_jobs=n_jobs,
init='random',
metric=metric,
perplexity=perplexity,
learning_rate=learning_rate,
early_exaggeration=early_exaggeration)
print(
'computing t-SNE, using Multicore t-SNE for {0} jobs'.format(n_jobs))
# need to transform to float64 for MulticoreTSNE...
self.embedding = tsne.fit_transform(self.X_vis.astype('float64'))
else:
print('computing t-SNE, using scikit-learn implementation')
tsne = manifold.TSNE(n_components=2,
init='pca',
random_state=0,
metric='correlation',
perplexity=perplexity)
self.embedding = tsne.fit_transform(self.X_vis)
print('atribute embedding is updated with t-SNE coordinates')
if n_comp:
self.X_vis = self.X.copy(
)[:, self.normal_genes.isin(self.selected_genes)]
return
def fit_pca(self, n_comp=2, fdr=None, sample_variance=None):
'''2D PCA of the Data based on first 2 principal components'''
if not self._selection_flag:
print(
'Criterium for selection of genes undefined, please select FDR < 1 OR sample variance > 0')
print(' ')
if self.X is None:
raise ValueError('Nothing to plot, please fit the data first')
else:
self.selected_genes = self.select_genes(fdr, sample_variance)
self.X_vis = self.X.copy(
)[:, self.normal_genes.isin(self.selected_genes)]
pca = PCA(n_components=n_comp, svd_solver='full')
self.embedding = pca.fit_transform(self.X_vis)
print('atribute embedding is updated with t-SNE coordinates')
return
def plot(self,
path=False,
title=False,
labels=False,
palette=pallete_50,
gene=False,
data=False,
size=4.5,
fontsize=13.5,
legend=False,
legendcol=5,
points=5,
xytitle='t-SNE',
tight=False
):
"Ploting labels"
fontsize = size*2.7
if labels is not False:
if not palette:
palette = sns.color_palette("husl", len(set(labels))+1)
if 0 in labels or 200001 in labels:
palette = palette
else:
palette = self.palette_hierarchy
with sns.plotting_context("paper", font_scale=1.5):
self.style()
g = sns.lmplot(x='x',
y='y',
fit_reg=False,
scatter_kws={'s': points,
'alpha': 1},
hue='label',
data=pd.DataFrame(self.embedding,
columns=['x', 'y'])
.join(pd.Series(labels, name='label')),
height=size,
palette=sns.set_palette(palette),
legend=False)
g.set(yticks=[], xticks=[])
if legend is not False:
if title:
sep = 1.05
else:
sep = 1.0
if type(legend) is list:
plt.legend(legend,
loc='lower center',
bbox_to_anchor=(0.5, sep),
ncol=legendcol,
frameon=True,
markerscale=size//2,
fontsize=size+3)
elif legend is True:
plt.legend(loc='lower center',
bbox_to_anchor=(0.5, sep),
ncol=legendcol,
frameon=True,
markerscale=np.floor(size/2.),
fontsize=size+3.0)
plt.xlabel(xytitle + '1', fontsize=fontsize)
plt.ylabel(xytitle + '2', fontsize=fontsize)
plt.autoscale(enable=True, axis='both', tight=tight)
elif type(gene) is list:
if gene[0] == 'library':
color = (self.X3.T > 0).sum()
else:
color = self.X3[gene].T.mean()
self.style()
fig = plt.figure(figsize=(size, size + 0.5), dpi=100)
g = plt.scatter(self.embedding[:, 0],
self.embedding[:, 1],
s=points,
c=color,
alpha=1,
cmap='coolwarm'
)
plt.xlabel(xytitle + '1', fontsize=fontsize)
plt.ylabel(xytitle + '2', fontsize=fontsize)
plt.gca().set_xticks([])
plt.gca().set_yticks([])
plt.autoscale(enable=True, axis='both', tight=tight)
divider = make_axes_locatable(plt.gca())
cax = divider.append_axes("right", "2.5%", pad="1%")
if gene[0] == 'library':
plt.colorbar(g, cax=cax, label='library complexity')
else:
plt.colorbar(g, cax=cax, label='log2(1+TPM)')
elif type(gene) is tuple:
n = len(gene)
nrow = int(np.sqrt(n))
ncol = int(np.ceil(n / nrow))
if (n % 2 != 0 and n > 3) or nrow * ncol < n:
ncol = ncol+1
if n < 4:
fig, axs = plt.subplots(nrow, ncol, dpi=100,
figsize=(ncol*size*1.5,
nrow*size*1.5)
)
else:
fig, axs = plt.subplots(nrow, ncol, dpi=100,
figsize=(ncol*size,
nrow*size)
)
if nrow*ncol > n:
for i in range(ncol*nrow - n):
fig.delaxes(axs[-1][-(i+1)])
if type(axs) != np.ndarray:
axs = [axs]
else:
axs = axs.ravel()
for i in range(n):
if i < n:
if type(gene[i]) is list:
marker = gene[i]
else:
marker = [gene[i]]
if marker[0] == 'library':
color = (self.X3.T > 0).sum()
else:
color = self.X3[marker].T.mean()
self.style()
g = axs[i].scatter(self.embedding[:, 0],
self.embedding[:, 1],
s=points,
c=color,
alpha=1,
cmap='coolwarm'
)
axs[i].set_xticks([])
axs[i].set_yticks([])
axs[i].autoscale(enable=True, axis='both', tight=tight)
divider = make_axes_locatable(axs[i])
cax = divider.append_axes("right", "2.5%", pad="1%")
if marker[0] == 'library':
fig.colorbar(g, cax=cax,
label='library complexity')
else:
fig.colorbar(g, cax=cax)
if title:
axs[i].set_title(title)
else:
if len(marker) < 2:
axs[i].set_title(
str(marker[0]), fontsize=fontsize-2)
elif len(marker) > 1:
axs[i].set_title(
'list starting with ' + str(marker[0]), fontsize=fontsize-2)
if i % ncol == 0:
axs[i].set_ylabel(xytitle+'2', fontsize=fontsize)
if ((i // ncol) + 1) == nrow:
axs[i].set_xlabel(xytitle+'1', fontsize=fontsize)
else:
with sns.plotting_context("paper", font_scale=1.5):
self.style()
g = sns.lmplot(x='x',
y='y',
fit_reg=False,
scatter_kws={'s': points,
'alpha': .9, 'color': 'black'},
hue=None,
data=pd.DataFrame(self.embedding,
columns=['x', 'y']),
height=size,
aspect=1,
legend=False,
)
g.set(yticks=[], xticks=[])
plt.xlabel(xytitle + '1', fontsize=fontsize)
plt.ylabel(xytitle + '2', fontsize=fontsize)
plt.autoscale(enable=True, axis='both', tight=tight)
sns.despine(top=False, right=False, left=False, bottom=False)
if title:
plt.title(title)
if path:
plt.savefig(path, bbox_inches='tight')
plt.show()
def _get_cluster_classes(self, den, label='ivl'):
cluster_idxs = defaultdict(list)
for c, pi in zip(den['color_list'], den['icoord']):
for leg in pi[1:3]:
i = (leg - 5.0) / 10.0
if abs(i - int(i)) < 1e-5:
cluster_idxs[c].append(int(i))
cluster_classes = {}
for c, l in cluster_idxs.items():
i_l = [den[label][i] for i in l]
cluster_classes[c] = i_l
return cluster_classes
def visual_hierarchy(self,
thrs=None,
path=False,
palette=pallete_50,
cell_label=False,
value_range=5):
# thrs is te threshold in dendrogram
# cell_label if one wants to add extra info in columns colors
self.style_mp_stat()
pop = pd.DataFrame(self.X_vis, index=self.normal_cells,
columns=self.selected_genes)
pop = pop.T
palette_clust = palette
Y = self.h2
X = self.h1
if thrs is None:
thrs = 0.5*max(Y[:, 2])
else:
thrs = thrs
hierarchy.set_link_color_palette(palette_clust)
denC = hierarchy.dendrogram(
Y, labels=pop.columns, color_threshold=thrs, no_plot=True)
clustersC = self._get_cluster_classes(denC)
clusterC = []
for i in pop.columns:
included = False
for j in clustersC.keys():
if i in clustersC[j]:
clusterC.append(j)
included = True
if not included:
clusterC.append(None)
if cell_label:
gC = sns.clustermap(pop,
linewidths=0, xticklabels=False, yticklabels=False, cbar_kws={"orientation": "vertical"},
center=0, figsize=(8, 6), vmin=-value_range, vmax=value_range, col_colors=[clusterC, cell_label], col_linkage=Y, row_linkage=X, cmap='coolwarm')
gC.cax.set_visible(True)
else:
gC = sns.clustermap(pop,
linewidths=0, xticklabels=False, yticklabels=False, vmin=-value_range, vmax=value_range,
cbar_kws={"orientation": "vertical"}, center=0, figsize=(7, 7), col_colors=clusterC, col_linkage=Y, row_linkage=X, cmap='coolwarm')
if path:
plt.savefig(path, bbox_inches='tight')
tabC = pd.DataFrame()
for i in set(denC['color_list']):
color_cluster = pd.Series(clusterC).apply(
lambda x: x == str(i)).astype(int).values.tolist()
tabC[i] = color_cluster
if len(tabC.index) == len(pop.columns):
tabC.index = pop.columns
tabC = tabC[list(set(tabC.columns.tolist()) & set(palette_clust))]
plt.show()
for i in range(0, tabC.shape[1]):
tabC.T.iloc[i] = tabC.T.iloc[i]*(i+1)
excase = tabC.loc[tabC.T.sum() < 1].index.tolist()
if len(excase) > 0:
for i in excase:
self.embedding = np.delete(
self.embedding, pd.Index(excase).get_loc(i), axis=0)
tabC = tabC.copy()
tabC.drop(excase, axis=0, inplace=True)
self.labels_hierarchy = tabC.T.sum().tolist()
self.palette_hierarchy = tabC.columns.tolist()
else:
print('threshold too small')
def get_cluster_info(self,
labels=False,
cluster=False,
genes=10,
plot_genes=True):
self.style()
cl = pd.DataFrame({'cluster': labels}, index=self.normal_cells)
if cluster is 'all':
return cl
elif type(cluster) is int:
cl_n = cl.loc[cl['cluster'] == cluster].index.tolist()
cl[cl['cluster'] != cluster] = 200000
cl[cl['cluster'] == cluster] = 200001
print('The cluster', cluster, 'has', len(cl_n), 'cells')
self.plot(labels=cl['cluster'].tolist(),
palette=['#d8dcd6', '#49759c'])
print('The top', genes,
'highly expressed signal-like genes in this cluster are:')
top = self.X3.loc[self.normal_cells, self.normal_genes].loc[cl_n, :].mean()\
.sort_values(ascending=False)[:genes].index.tolist()
for i in top:
print(i)
if plot_genes:
self.plot(gene=tuple(top))
else:
print('Select right cluster option')
def get_gene_info(self,
labels=False,
gene=False,
path=False,
legend=False,
size=6.5):
self.style()
if 0 in labels or 200001 in labels:
palette = pallete_50
else:
palette = self.palette_hierarchy
cl = pd.DataFrame({'cluster': labels}, index=self.normal_cells)
cl2 = [self.X3[gene].T.mean()[cl.loc[cl['cluster'] == i].index]
for i in pd.unique(labels)]
if type(legend) is list:
order = legend
index_lab = legend
else:
order = sorted(pd.unique(labels))
index_lab = pd.unique(labels)
cl2 = pd.DataFrame(cl2, index=index_lab)
fig = plt.figure(figsize=[size, 5])
sns.violinplot(data=cl2.T, orient='v', inner=None, order=order,
palette=palette,linewidth=0.5)
sns.stripplot(data=cl2.T, color="black", size=1.5, order=order)
if legend:
plt.xticks(rotation=45)
plt.xlabel('Cluster')
plt.ylabel('log2(1+TPM)')
if path:
plt.savefig(os.path.splitext(path)[
0]+'_violin'+os.path.splitext(path)[1], bbox_inches='tight')
# ------Fig2 ------------
cl['gene'] = self.X3[gene].T.mean()
palette2 = []
pal_list = cl.copy()
for i in sorted(pd.unique(cl['cluster'])):
if cl[cl['cluster'] == i]['gene'].var() < 0.0000000001:
cl.drop(cl[cl['cluster'] == i].index, axis=0, inplace=True)
else:
palette2.append(
palette[sorted(pd.unique(pal_list['cluster'])).index(i)])
if len(palette2) > 0:
palette = palette2
sns.set(style="white", rc={"axes.facecolor": (0, 0, 0, 0)})
g = sns.FacetGrid(cl, row="cluster", hue="cluster", aspect=8.5,
height=.7, palette=palette)
g.map(sns.kdeplot, "gene", clip_on=False,
shade=True, alpha=1, lw=1.5, bw=.13)
g.map(plt.axhline, y=0, lw=2, clip_on=False)
down, up = plt.ylim()
for i in g.row_names:
if type(legend) is list:
mi = legend[g.row_names.index(i)]
g.axes[g.row_names.index(i),0].set_ylabel(mi,y=down*1.2,
labelpad=20,
color=palette[g.row_names.index(i)],
fontsize='medium',rotation=0)
else:
mi = i
g.axes[g.row_names.index(i),0].set_ylabel('cluster '+str(mi),y=down*1.2,
labelpad=20,
color=palette[g.row_names.index(i)],
fontsize='medium',rotation=0)
if len(palette) < 11 and len(palette) > 5:
g.fig.subplots_adjust(hspace=-0.7, top=0.70, bottom=0.05)
elif len(palette) < 6:
g.fig.subplots_adjust(hspace=-0.6, top=0.85, bottom=0.05)
else:
g.fig.subplots_adjust(hspace=-0.7, top=0.50, bottom=0.05)
g.set_titles("")
g.set(yticks=[])
g.despine(bottom=True, left=True)
plt.xlabel('log2(1+TPM)')
if path:
plt.savefig(os.path.splitext(path)[0]+'_ridge'+os.path.splitext(path)[1],
bbox_inches='tight')
self.style()
plt.show() | 0.840292 | 0.63484 |
import logging
from programmingtheiot.common.IDataMessageListener import IDataMessageListener
from programmingtheiot.data.ActuatorData import ActuatorData
from programmingtheiot.cda.sim.HumidifierActuatorSimTask import HumidifierActuatorSimTask
from programmingtheiot.cda.sim.HvacActuatorSimTask import HvacActuatorSimTask
class ActuatorAdapterManager(object):
"""
Shell representation of class for student implementation.
Handling All Actuators - invoking respective classes depending on input : Simulator or Emulator
Sending respective commands to do actuation from either of three devices via appropriate method - Simulator or Emulator
"""
useEmulator = None
dataMsgListener = None
def __init__(self, useEmulator: bool = False):
self.useEmulator = useEmulator
if(self.useEmulator == True):
logging.info("Emulators will be used")
humidifierModule = __import__('programmingtheiot.cda.emulated.HumidifierEmulatorTask', fromlist = ['HumidifierEmulatorTask'])
hueClazz = getattr(humidifierModule, 'HumidifierEmulatorTask')
self.humidifierEmulator = hueClazz()
hvacModule = __import__('programmingtheiot.cda.emulated.HvacEmulatorTask', fromlist = ['HvacEmulatorTask'])
hueClazz = getattr(hvacModule, 'HvacEmulatorTask')
self.hvacEmulator = hueClazz()
LedDisplayModule = __import__('programmingtheiot.cda.emulated.LedDisplayEmulatorTask', fromlist = ['LedDisplayEmulatorTask'])
hueClazz = getattr(LedDisplayModule, 'LedDisplayEmulatorTask')
self.LedDisplayEmulator = hueClazz()
else:
logging.info("Testing ActuatorAdapterManager class [using simulators]...")
# create the humidifier actuator
self.humidifierActuator = HumidifierActuatorSimTask()
# create the HVAC actuator
self.hvacActuator = HvacActuatorSimTask()
pass
"""Getting Actuator value from Simulator/Emulator as per self.useEmulator flag"""
def sendActuatorCommand(self, data: ActuatorData) -> bool:
logging.info("Actuator command received. Processing...")
if(self.useEmulator == False):
if(data.type == data.HUMIDIFIER_ACTUATOR_TYPE):
if(data.getCommand()==0):
logging.info("Emulating HUMIDIFIER actuator OFF:")
logging.info("---------------------------------------")
return False
else:
logging.info("Emulating HUMIDIFIER actuator ON:")
logging.info(" Humidifier value : %s", data.getValue())
return True
elif (data.type == data.HVAC_ACTUATOR_TYPE):
if(data.getCommand()==0):
logging.info("Emulating HVAC actuator OFF:")
logging.info("---------------------------------------")
return False
else:
logging.info("Emulating HVAC actuator ON:")
logging.info(" HVAC value : %s", data.getValue())
return True
elif(self.useEmulator == True):
if(data.type == data.HUMIDIFIER_ACTUATOR_TYPE):
if(data.getCommand()==0):
logging.info("Emulating HUMIDIFIER actuator OFF:")
logging.info("---------------------------------------")
return False
else:
logging.info("Emulating HUMIDIFIER actuator ON:")
logging.info(" Humidifier value : %s",data.getValue())
self.humidifierEmulator._handleActuation(data.getCommand(), data.getValue())
return True
elif (data.type == data.HVAC_ACTUATOR_TYPE):
if(data.getCommand()==0):
logging.info("Emulating HVAC actuator OFF:")
logging.info("---------------------------------------")
return False
else:
logging.info("Emulating HVAC actuator ON:")
logging.info(" HVAC value : %s",data.getValue())
self.hvacEmulator._handleActuation(data.getCommand(), data.getValue())
return True
def setDataMessageListener(self, listener: IDataMessageListener) -> bool:
if( listener != None):
self.dataMsgListener = listener
else:
self.dataMsgListener = False
pass | src/main/python/programmingtheiot/cda/system/ActuatorAdapterManager.py |
import logging
from programmingtheiot.common.IDataMessageListener import IDataMessageListener
from programmingtheiot.data.ActuatorData import ActuatorData
from programmingtheiot.cda.sim.HumidifierActuatorSimTask import HumidifierActuatorSimTask
from programmingtheiot.cda.sim.HvacActuatorSimTask import HvacActuatorSimTask
class ActuatorAdapterManager(object):
"""
Shell representation of class for student implementation.
Handling All Actuators - invoking respective classes depending on input : Simulator or Emulator
Sending respective commands to do actuation from either of three devices via appropriate method - Simulator or Emulator
"""
useEmulator = None
dataMsgListener = None
def __init__(self, useEmulator: bool = False):
self.useEmulator = useEmulator
if(self.useEmulator == True):
logging.info("Emulators will be used")
humidifierModule = __import__('programmingtheiot.cda.emulated.HumidifierEmulatorTask', fromlist = ['HumidifierEmulatorTask'])
hueClazz = getattr(humidifierModule, 'HumidifierEmulatorTask')
self.humidifierEmulator = hueClazz()
hvacModule = __import__('programmingtheiot.cda.emulated.HvacEmulatorTask', fromlist = ['HvacEmulatorTask'])
hueClazz = getattr(hvacModule, 'HvacEmulatorTask')
self.hvacEmulator = hueClazz()
LedDisplayModule = __import__('programmingtheiot.cda.emulated.LedDisplayEmulatorTask', fromlist = ['LedDisplayEmulatorTask'])
hueClazz = getattr(LedDisplayModule, 'LedDisplayEmulatorTask')
self.LedDisplayEmulator = hueClazz()
else:
logging.info("Testing ActuatorAdapterManager class [using simulators]...")
# create the humidifier actuator
self.humidifierActuator = HumidifierActuatorSimTask()
# create the HVAC actuator
self.hvacActuator = HvacActuatorSimTask()
pass
"""Getting Actuator value from Simulator/Emulator as per self.useEmulator flag"""
def sendActuatorCommand(self, data: ActuatorData) -> bool:
logging.info("Actuator command received. Processing...")
if(self.useEmulator == False):
if(data.type == data.HUMIDIFIER_ACTUATOR_TYPE):
if(data.getCommand()==0):
logging.info("Emulating HUMIDIFIER actuator OFF:")
logging.info("---------------------------------------")
return False
else:
logging.info("Emulating HUMIDIFIER actuator ON:")
logging.info(" Humidifier value : %s", data.getValue())
return True
elif (data.type == data.HVAC_ACTUATOR_TYPE):
if(data.getCommand()==0):
logging.info("Emulating HVAC actuator OFF:")
logging.info("---------------------------------------")
return False
else:
logging.info("Emulating HVAC actuator ON:")
logging.info(" HVAC value : %s", data.getValue())
return True
elif(self.useEmulator == True):
if(data.type == data.HUMIDIFIER_ACTUATOR_TYPE):
if(data.getCommand()==0):
logging.info("Emulating HUMIDIFIER actuator OFF:")
logging.info("---------------------------------------")
return False
else:
logging.info("Emulating HUMIDIFIER actuator ON:")
logging.info(" Humidifier value : %s",data.getValue())
self.humidifierEmulator._handleActuation(data.getCommand(), data.getValue())
return True
elif (data.type == data.HVAC_ACTUATOR_TYPE):
if(data.getCommand()==0):
logging.info("Emulating HVAC actuator OFF:")
logging.info("---------------------------------------")
return False
else:
logging.info("Emulating HVAC actuator ON:")
logging.info(" HVAC value : %s",data.getValue())
self.hvacEmulator._handleActuation(data.getCommand(), data.getValue())
return True
def setDataMessageListener(self, listener: IDataMessageListener) -> bool:
if( listener != None):
self.dataMsgListener = listener
else:
self.dataMsgListener = False
pass | 0.201892 | 0.192558 |
import json
from abc import ABC, abstractmethod
from argparse import ArgumentParser
from logging import Logger
from typing import Dict, Any
from pyspark.sql import SparkSession
import sys
# abstract class for jobs
class Job(ABC):
def __init__(self, spark=None, init_conf=None):
self.spark = self._prepare_spark(spark)
self.logger = self._prepare_logger()
self.dbutils = self.get_dbutils()
if init_conf:
self.conf = init_conf
else:
self.conf = self._provide_config()
self._log_conf()
@staticmethod
def _prepare_spark(spark) -> SparkSession:
if not spark:
return SparkSession.builder.getOrCreate()
else:
return spark
@staticmethod
def _get_dbutils(spark: SparkSession):
try:
from pyspark.dbutils import DBUtils # noqa
if "dbutils" not in locals():
utils = DBUtils(spark)
return utils
else:
return locals().get("dbutils")
except ImportError:
return None
def get_dbutils(self):
utils = self._get_dbutils(self.spark)
if not utils:
self.logger.warn("No DBUtils defined in the runtime")
else:
self.logger.info("DBUtils class initialized")
return utils
def _provide_config(self):
self.logger.info("Reading configuration from --conf-file job option")
conf_file = self._get_conf_file()
if not conf_file:
self.logger.info(
"No conf file was provided, setting configuration to empty dict."
"Please override configuration in subclass init method"
)
return {}
else:
self.logger.info(
f"Conf file was provided, reading configuration from {conf_file}"
)
return self._read_config(conf_file)
@staticmethod
def _get_conf_file():
p = ArgumentParser()
p.add_argument("--conf-file", required=False, type=str)
namespace = p.parse_known_args(sys.argv[1:])[0]
return namespace.conf_file
def _read_config(self, conf_file) -> Dict[str, Any]:
raw_content = "".join(
self.spark.read.format("text").load(conf_file).toPandas()["value"].tolist()
)
config = json.loads(raw_content)
return config
def _prepare_logger(self) -> Logger:
log4j_logger = self.spark._jvm.org.apache.log4j # noqa
return log4j_logger.LogManager.getLogger(self.__class__.__name__)
def _log_conf(self):
# log parameters
self.logger.info("Launching job with configuration parameters:")
for key, item in self.conf.items():
self.logger.info("\t Parameter: %-30s with value => %-30s" % (key, item))
@abstractmethod
def launch(self):
"""
Main method of the job.
:return:
"""
pass | code/common.py | import json
from abc import ABC, abstractmethod
from argparse import ArgumentParser
from logging import Logger
from typing import Dict, Any
from pyspark.sql import SparkSession
import sys
# abstract class for jobs
class Job(ABC):
def __init__(self, spark=None, init_conf=None):
self.spark = self._prepare_spark(spark)
self.logger = self._prepare_logger()
self.dbutils = self.get_dbutils()
if init_conf:
self.conf = init_conf
else:
self.conf = self._provide_config()
self._log_conf()
@staticmethod
def _prepare_spark(spark) -> SparkSession:
if not spark:
return SparkSession.builder.getOrCreate()
else:
return spark
@staticmethod
def _get_dbutils(spark: SparkSession):
try:
from pyspark.dbutils import DBUtils # noqa
if "dbutils" not in locals():
utils = DBUtils(spark)
return utils
else:
return locals().get("dbutils")
except ImportError:
return None
def get_dbutils(self):
utils = self._get_dbutils(self.spark)
if not utils:
self.logger.warn("No DBUtils defined in the runtime")
else:
self.logger.info("DBUtils class initialized")
return utils
def _provide_config(self):
self.logger.info("Reading configuration from --conf-file job option")
conf_file = self._get_conf_file()
if not conf_file:
self.logger.info(
"No conf file was provided, setting configuration to empty dict."
"Please override configuration in subclass init method"
)
return {}
else:
self.logger.info(
f"Conf file was provided, reading configuration from {conf_file}"
)
return self._read_config(conf_file)
@staticmethod
def _get_conf_file():
p = ArgumentParser()
p.add_argument("--conf-file", required=False, type=str)
namespace = p.parse_known_args(sys.argv[1:])[0]
return namespace.conf_file
def _read_config(self, conf_file) -> Dict[str, Any]:
raw_content = "".join(
self.spark.read.format("text").load(conf_file).toPandas()["value"].tolist()
)
config = json.loads(raw_content)
return config
def _prepare_logger(self) -> Logger:
log4j_logger = self.spark._jvm.org.apache.log4j # noqa
return log4j_logger.LogManager.getLogger(self.__class__.__name__)
def _log_conf(self):
# log parameters
self.logger.info("Launching job with configuration parameters:")
for key, item in self.conf.items():
self.logger.info("\t Parameter: %-30s with value => %-30s" % (key, item))
@abstractmethod
def launch(self):
"""
Main method of the job.
:return:
"""
pass | 0.489015 | 0.069164 |
import csv
import json
import urllib
# starting static list of universities and their locations to reduce Google Places API call frequency
univ_locations = {'Texas A&M University Corpus Christi': 'us-tx', 'University of Texas at Arlington': 'us-tx', 'University of Houston': 'us-tx',
'Michigan Technological University': 'us-mi', 'Rice University': 'us-tx', 'University of Connecticut': 'us-ct', 'University of Florida': 'us-fl',
'Stanford University': 'us-ca', 'Texas A&M University': 'us-tx', 'University of North Texas': 'us-tx', 'University of Texas Medical Branch': 'us-tx',
'University of North Carolina, Chapel Hill': 'us-nc', 'Drake University': 'us-ia', 'Brown University': 'us-ri', 'University of California, Berkeley': 'us-ca',
'University of Texas at El Paso': 'us-tx', 'University of Texas at Dallas': 'us-tx', 'California Institute of Technology': 'us-ca',
'Clarkson University': 'us-ny', 'University of California, Los Angeles': 'us-ca', 'University of Memphis': 'us-tn',
'College of Physicians & Surgeons, Columbia University': 'us-tx', 'University of California, Santa Barbara': 'us-ca',
'Massachusetts Institute of Technology': 'us-ma', 'University of Texas Health Science Center at Houston': 'us-tx', 'Los Alamos National Laboratory': 'us-nm',
'Texas Tech University': 'us-tx', 'Texas A&M University-Kingsville': 'us-tx', 'Arizona State University': 'us-az', 'Vanderbilt University': 'us-tn',
'The Pennsylvania University': 'us-pa', 'North Carolina State University at Raleigh': 'us-nc', 'University of Texas Southwestern Medical Center': 'us-tx',
'University of Pittsburgh': 'us-pa', 'University of Michigan': 'us-mi', 'University of Pennsylvania': 'us-pa', 'National Renewable Energy Laboratory': 'us-co',
'University of California-Riverside': 'us-ca', 'University of California, Santa Cruz': 'us-ca', 'Vassar College': 'us-ny', 'City University of New York': 'us-ny',
'California State University-Northridge': 'us-ca', 'University of Virginia': 'us-va', 'Missouri University of Science and Technology': 'us-mo',
'Institute for Computational Engineering and Science': 'us-tx', '<NAME>. and <NAME> School of Geosciences': 'us-tx', 'Princeton University': 'us-nj',
'University of Iowa': 'us-ia', 'University of Kentucky': 'us-ky', 'Texas Tech University Health Sciences Center, El Paso': 'us-tx', 'Fox Chase Cancer Center': 'us-pa',
'Old Dominion University': 'us-va', 'Brooklyn College': 'us-ny', 'Texas Advanced Computing Center': 'us-tx', 'Yale University': 'us-ct',
'University of California, Davis': 'us-ca', 'University of Central Florida': 'us-fl', 'UTHealth': 'us-tx', 'Louisiana State University': 'us-la',
'Columbia University': 'us-ny', 'Binghamton University': 'us-ny', 'Oregon State University': 'us-or', 'University of Oklahoma': 'us-ok',
'University of Washington': 'us-wa', 'Purdue University': 'us-in', 'Institute for Fusion Studies': 'us-tx', 'Oregon Health and Science University': 'us-or',
'University of Notre Dame': 'us-in', 'Department of Economics': 'us-tx', 'University of Missouri': 'us-mo', 'Department of Electrical and Computer Engineering': 'us-tx',
'New Mexico Institute of Mining and Technology': 'us-nm', 'Harvard University': 'us-ma', 'SUNY at Binghamton': 'us-ny', 'Offshore Technology Research Center': 'us-tx',
'University of Texas M. D. Anderson Cancer Center': 'us-tx', 'University of California, San Diego': 'us-ca', 'Boston College': 'us-ma', 'Northwestern University': 'us-il',
'Department of Mechanical Engineering': 'us-tx', 'University of Maryland, College Park': 'us-md', 'College of Engineering': 'us-tx', 'Brandeis University': 'us-ma',
'Salk Institute for Biological Studies': 'us-ca', 'University of Colorado': 'us-co', 'University of Wisconsin': 'us-wi', 'University of Delaware': 'us-de',
'Georgia Institute of Technology': 'us-ga', 'Department of Aerospace Engineering and Engineering Mechanics': 'us-tx', 'New Mexico State University': 'us-nm',
'University of Chicago': 'us-il', 'College of Charleston': 'us-sc', 'California State University Northridge': 'us-ca','University of Texas at Austin': 'us-tx',
'Pennsylvania State University': 'us-pa', 'Tufts University': 'us-ma', 'Department of Chemical Engineering': 'us-tx', 'Rochester Institute of Technology': 'us-ny',
'South Dakota State University': 'us-sd', 'George Mason University': 'us-va', 'University of Alabama, Huntsville': 'us-al', 'University of Georgia': 'us-ga',
'University of Nevada-Las Vegas': 'us-nv', 'Washington University in St. Louis': 'us-wa', 'University of Texas at San Antonio': 'us-tx', 'Iowa State University': 'us-ia',
'University of California, Irvine': 'us-ca', 'University of Nebraska, Omaha': 'us-ne', 'Indiana University': 'us-in', 'University of Illinois': 'us-il',
'Johns Hopkins University': 'us-md', 'Bowling Green State University': 'us-oh', 'University of Tennessee, Knoxville': 'us-tn', 'Department of Chemistry and Biochemistry': 'us-tx',
'Jackson State University': 'us-ms', 'Department of Psychology': 'us-tx', 'Department of Physics': 'us-tx', 'Department of Petroleum and Geosystems Engineering': 'us-tx',
'Center for Aeromechanics Research': 'us-tx', 'Center for Energy and Environmental Resources': 'us-tx', 'Center for Research in Water Resources': 'us-tx',
'San Francisco State University': 'us-ca', 'Ohio State University': 'us-oh', 'University of Denver': 'us-co', 'Boston University': 'us-ma', 'Haverford College': 'us-pa',
'Rensselaer Polytechnic Institute': 'us-ny', 'Cornell University': 'us-ny', 'SUNY at Albany': 'us-ny', 'University of Wisconsin-Milwaukee': 'us-wi',
'University of Oregon': 'us-or', 'Carnegie Mellon University': 'us-pa', 'Tennessee Technological University': 'us-tn', 'Rutgers University': 'us-nj',
'University of Nebraska at Lincoln': 'us-ne', 'University of Massachusetts, Amherst': 'us-ma', 'Drexel University': 'us-pa', 'Harvey Mudd College': 'us-ca',
'University of South Carolina': 'us-sc'}
# Makes a new base university_states.csv file from static structure
def refresh_csv():
write_univ_states_csv(univ_locations, True)
# Returns a parsable list of jobs running on Stampede2 from a certain institution.
def get_jobs_by_institution(target_institution):
with open('institution_job_mappings.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter = '\t')
next(csv_reader)
for line in csv_reader:
if (line[0] == target_institution):
job_string = line[1]
jobs_list = build_job_array(job_string)
if (DEBUG):
print jobs_list
return jobs_list
return "That institution is not currently running any jobs on Stampede2."
# Makes an array out of the string holding the list of jobs.
def build_job_array(job_string):
job_string = job_string.lstrip("['")
job_string = job_string.rstrip("']")
jobs_list = job_string.split("', '")
return jobs_list
# Returns the number of jobs running on Stampede2 from a certain university.
def getCount(target_institution):
inst_data = []
with open('institution_job_mappings.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter = '\t')
for line in csv_reader:
if(line[0] == target_institution):
return len(build_job_array(get_jobs_by_institution(target_institution)))
return 0
def rebuild_array():
with open('university_states.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter = '\t')
for line in csv_reader:
institution = line[0]
if not institution in univ_locations.keys():
univ_locations[institution] = line[1]
def add_new_universities():
rebuild_array()
university_info = {}
addresses = []
# DO NOT EDIT THIS KEY
API_key = "<KEY>"
with open('institution_job_mappings.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter = '\t')
next(csv_reader)
for line in csv_reader:
institution = line[0]
if not institution in univ_locations.keys():
# make api call to get univerity location
print institution
keys = institution.split()
search_string = '%20'.join(keys)
# print search_string
request_url = "https://maps.googleapis.com/maps/api/place/findplacefromtext/json?input=" + search_string + "&inputtype=textquery&fields=formatted_address&key=" + API_key
response = urllib.urlopen(request_url)
data = json.loads( response.read())
# print data
if (len(data['candidates']) > 2):
address_components = data['candidates'][0]['formatted_address'].split(', ')
addresses.append(address_components)
state = address_components[-2][0:2]
university_info[institution] = "us-" + state.encode('utf-8').lower()
write_univ_states_csv(university_info, False)
def write_univ_states_csv(locations_list, initial):
# Make dictionary
keys = locations_list.keys()
univ_states = []
for institution in keys:
info = {'Institution' : institution, 'State' : locations_list[institution]}
univ_states.append(info)
# Write info out to csv
if (len(univ_states) > 0):
list_keys = sorted(univ_states[0].keys())
if (initial):
with open('university_states.csv', 'w')as output_file:
dict_writer = csv.DictWriter( output_file, list_keys, delimiter = '\t' )
dict_writer.writeheader()
dict_writer.writerows( univ_states )
else:
with open('university_states.csv', 'a')as output_file:
dict_writer = csv.DictWriter( output_file, keys, delimiter = '\t' )
dict_writer.writerows( univ_states )
write_state_count_csv()
def write_state_count_csv():
state_count = {}
with open('university_states.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter = '\t')
next(csv_reader)
for line in csv_reader:
state = line[1]
if not state in state_count.keys():
state_count[state] = 1
else:
state_count[state] += 1
state_count_array = []
for state in sorted(state_count):
info = {'State' : state, 'Count' : state_count[state]}
state_count_array.append(info)
with open('state_count.csv', 'w')as output_file:
keys = sorted(state_count_array[0].keys(), reverse = True)
dict_writer = csv.DictWriter( output_file, keys, delimiter = '\t' )
dict_writer.writeheader()
dict_writer.writerows( state_count_array )
def main():
refresh_csv()
add_new_universities()
main() | project_organizer.py | import csv
import json
import urllib
# starting static list of universities and their locations to reduce Google Places API call frequency
univ_locations = {'Texas A&M University Corpus Christi': 'us-tx', 'University of Texas at Arlington': 'us-tx', 'University of Houston': 'us-tx',
'Michigan Technological University': 'us-mi', 'Rice University': 'us-tx', 'University of Connecticut': 'us-ct', 'University of Florida': 'us-fl',
'Stanford University': 'us-ca', 'Texas A&M University': 'us-tx', 'University of North Texas': 'us-tx', 'University of Texas Medical Branch': 'us-tx',
'University of North Carolina, Chapel Hill': 'us-nc', 'Drake University': 'us-ia', 'Brown University': 'us-ri', 'University of California, Berkeley': 'us-ca',
'University of Texas at El Paso': 'us-tx', 'University of Texas at Dallas': 'us-tx', 'California Institute of Technology': 'us-ca',
'Clarkson University': 'us-ny', 'University of California, Los Angeles': 'us-ca', 'University of Memphis': 'us-tn',
'College of Physicians & Surgeons, Columbia University': 'us-tx', 'University of California, Santa Barbara': 'us-ca',
'Massachusetts Institute of Technology': 'us-ma', 'University of Texas Health Science Center at Houston': 'us-tx', 'Los Alamos National Laboratory': 'us-nm',
'Texas Tech University': 'us-tx', 'Texas A&M University-Kingsville': 'us-tx', 'Arizona State University': 'us-az', 'Vanderbilt University': 'us-tn',
'The Pennsylvania University': 'us-pa', 'North Carolina State University at Raleigh': 'us-nc', 'University of Texas Southwestern Medical Center': 'us-tx',
'University of Pittsburgh': 'us-pa', 'University of Michigan': 'us-mi', 'University of Pennsylvania': 'us-pa', 'National Renewable Energy Laboratory': 'us-co',
'University of California-Riverside': 'us-ca', 'University of California, Santa Cruz': 'us-ca', 'Vassar College': 'us-ny', 'City University of New York': 'us-ny',
'California State University-Northridge': 'us-ca', 'University of Virginia': 'us-va', 'Missouri University of Science and Technology': 'us-mo',
'Institute for Computational Engineering and Science': 'us-tx', '<NAME>. and <NAME> School of Geosciences': 'us-tx', 'Princeton University': 'us-nj',
'University of Iowa': 'us-ia', 'University of Kentucky': 'us-ky', 'Texas Tech University Health Sciences Center, El Paso': 'us-tx', 'Fox Chase Cancer Center': 'us-pa',
'Old Dominion University': 'us-va', 'Brooklyn College': 'us-ny', 'Texas Advanced Computing Center': 'us-tx', 'Yale University': 'us-ct',
'University of California, Davis': 'us-ca', 'University of Central Florida': 'us-fl', 'UTHealth': 'us-tx', 'Louisiana State University': 'us-la',
'Columbia University': 'us-ny', 'Binghamton University': 'us-ny', 'Oregon State University': 'us-or', 'University of Oklahoma': 'us-ok',
'University of Washington': 'us-wa', 'Purdue University': 'us-in', 'Institute for Fusion Studies': 'us-tx', 'Oregon Health and Science University': 'us-or',
'University of Notre Dame': 'us-in', 'Department of Economics': 'us-tx', 'University of Missouri': 'us-mo', 'Department of Electrical and Computer Engineering': 'us-tx',
'New Mexico Institute of Mining and Technology': 'us-nm', 'Harvard University': 'us-ma', 'SUNY at Binghamton': 'us-ny', 'Offshore Technology Research Center': 'us-tx',
'University of Texas M. D. Anderson Cancer Center': 'us-tx', 'University of California, San Diego': 'us-ca', 'Boston College': 'us-ma', 'Northwestern University': 'us-il',
'Department of Mechanical Engineering': 'us-tx', 'University of Maryland, College Park': 'us-md', 'College of Engineering': 'us-tx', 'Brandeis University': 'us-ma',
'Salk Institute for Biological Studies': 'us-ca', 'University of Colorado': 'us-co', 'University of Wisconsin': 'us-wi', 'University of Delaware': 'us-de',
'Georgia Institute of Technology': 'us-ga', 'Department of Aerospace Engineering and Engineering Mechanics': 'us-tx', 'New Mexico State University': 'us-nm',
'University of Chicago': 'us-il', 'College of Charleston': 'us-sc', 'California State University Northridge': 'us-ca','University of Texas at Austin': 'us-tx',
'Pennsylvania State University': 'us-pa', 'Tufts University': 'us-ma', 'Department of Chemical Engineering': 'us-tx', 'Rochester Institute of Technology': 'us-ny',
'South Dakota State University': 'us-sd', 'George Mason University': 'us-va', 'University of Alabama, Huntsville': 'us-al', 'University of Georgia': 'us-ga',
'University of Nevada-Las Vegas': 'us-nv', 'Washington University in St. Louis': 'us-wa', 'University of Texas at San Antonio': 'us-tx', 'Iowa State University': 'us-ia',
'University of California, Irvine': 'us-ca', 'University of Nebraska, Omaha': 'us-ne', 'Indiana University': 'us-in', 'University of Illinois': 'us-il',
'Johns Hopkins University': 'us-md', 'Bowling Green State University': 'us-oh', 'University of Tennessee, Knoxville': 'us-tn', 'Department of Chemistry and Biochemistry': 'us-tx',
'Jackson State University': 'us-ms', 'Department of Psychology': 'us-tx', 'Department of Physics': 'us-tx', 'Department of Petroleum and Geosystems Engineering': 'us-tx',
'Center for Aeromechanics Research': 'us-tx', 'Center for Energy and Environmental Resources': 'us-tx', 'Center for Research in Water Resources': 'us-tx',
'San Francisco State University': 'us-ca', 'Ohio State University': 'us-oh', 'University of Denver': 'us-co', 'Boston University': 'us-ma', 'Haverford College': 'us-pa',
'Rensselaer Polytechnic Institute': 'us-ny', 'Cornell University': 'us-ny', 'SUNY at Albany': 'us-ny', 'University of Wisconsin-Milwaukee': 'us-wi',
'University of Oregon': 'us-or', 'Carnegie Mellon University': 'us-pa', 'Tennessee Technological University': 'us-tn', 'Rutgers University': 'us-nj',
'University of Nebraska at Lincoln': 'us-ne', 'University of Massachusetts, Amherst': 'us-ma', 'Drexel University': 'us-pa', 'Harvey Mudd College': 'us-ca',
'University of South Carolina': 'us-sc'}
# Makes a new base university_states.csv file from static structure
def refresh_csv():
write_univ_states_csv(univ_locations, True)
# Returns a parsable list of jobs running on Stampede2 from a certain institution.
def get_jobs_by_institution(target_institution):
with open('institution_job_mappings.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter = '\t')
next(csv_reader)
for line in csv_reader:
if (line[0] == target_institution):
job_string = line[1]
jobs_list = build_job_array(job_string)
if (DEBUG):
print jobs_list
return jobs_list
return "That institution is not currently running any jobs on Stampede2."
# Makes an array out of the string holding the list of jobs.
def build_job_array(job_string):
job_string = job_string.lstrip("['")
job_string = job_string.rstrip("']")
jobs_list = job_string.split("', '")
return jobs_list
# Returns the number of jobs running on Stampede2 from a certain university.
def getCount(target_institution):
inst_data = []
with open('institution_job_mappings.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter = '\t')
for line in csv_reader:
if(line[0] == target_institution):
return len(build_job_array(get_jobs_by_institution(target_institution)))
return 0
def rebuild_array():
with open('university_states.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter = '\t')
for line in csv_reader:
institution = line[0]
if not institution in univ_locations.keys():
univ_locations[institution] = line[1]
def add_new_universities():
rebuild_array()
university_info = {}
addresses = []
# DO NOT EDIT THIS KEY
API_key = "<KEY>"
with open('institution_job_mappings.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter = '\t')
next(csv_reader)
for line in csv_reader:
institution = line[0]
if not institution in univ_locations.keys():
# make api call to get univerity location
print institution
keys = institution.split()
search_string = '%20'.join(keys)
# print search_string
request_url = "https://maps.googleapis.com/maps/api/place/findplacefromtext/json?input=" + search_string + "&inputtype=textquery&fields=formatted_address&key=" + API_key
response = urllib.urlopen(request_url)
data = json.loads( response.read())
# print data
if (len(data['candidates']) > 2):
address_components = data['candidates'][0]['formatted_address'].split(', ')
addresses.append(address_components)
state = address_components[-2][0:2]
university_info[institution] = "us-" + state.encode('utf-8').lower()
write_univ_states_csv(university_info, False)
def write_univ_states_csv(locations_list, initial):
# Make dictionary
keys = locations_list.keys()
univ_states = []
for institution in keys:
info = {'Institution' : institution, 'State' : locations_list[institution]}
univ_states.append(info)
# Write info out to csv
if (len(univ_states) > 0):
list_keys = sorted(univ_states[0].keys())
if (initial):
with open('university_states.csv', 'w')as output_file:
dict_writer = csv.DictWriter( output_file, list_keys, delimiter = '\t' )
dict_writer.writeheader()
dict_writer.writerows( univ_states )
else:
with open('university_states.csv', 'a')as output_file:
dict_writer = csv.DictWriter( output_file, keys, delimiter = '\t' )
dict_writer.writerows( univ_states )
write_state_count_csv()
def write_state_count_csv():
state_count = {}
with open('university_states.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter = '\t')
next(csv_reader)
for line in csv_reader:
state = line[1]
if not state in state_count.keys():
state_count[state] = 1
else:
state_count[state] += 1
state_count_array = []
for state in sorted(state_count):
info = {'State' : state, 'Count' : state_count[state]}
state_count_array.append(info)
with open('state_count.csv', 'w')as output_file:
keys = sorted(state_count_array[0].keys(), reverse = True)
dict_writer = csv.DictWriter( output_file, keys, delimiter = '\t' )
dict_writer.writeheader()
dict_writer.writerows( state_count_array )
def main():
refresh_csv()
add_new_universities()
main() | 0.469763 | 0.412353 |
import shutil
import sklearn
import torch
from ignite.contrib.handlers import ProgressBar
from ignite.engine import Engine, Events
from ignite.handlers import ModelCheckpoint, EarlyStopping
from torch.utils.data import DataLoader
from typing import Optional
from slp.util import system
from slp.util import types
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
#DEVICE = 'cpu'
class CheckpointHandler(ModelCheckpoint):
"""Augment ignite ModelCheckpoint Handler with copying the best file to a
{filename_prefix}_{experiment_name}.best.pth.
This helps for automatic testing etc.
Args:
engine (ignite.engine.Engine): The trainer engine
to_save (dict): The objects to save
"""
def __call__(self, engine: Engine, to_save: types.GenericDict) -> None:
super(CheckpointHandler, self).__call__(engine, to_save)
# Select model with best loss
_, paths = self._saved[-1]
for src in paths:
splitted = src.split('_')
fname_prefix = splitted[0]
name = splitted[1]
dst = f'{fname_prefix}_{name}.best.pth'
shutil.copy(src, dst)
class EvaluationHandler(object):
def __init__(self, pbar: Optional[ProgressBar] = None,
validate_every: int = 1,
early_stopping: Optional[EarlyStopping] = None):
self.validate_every = validate_every
self.print_fn = pbar.log_message if pbar is not None else print
self.early_stopping = early_stopping
def predict_testset(self, model, test_loader):
# import pdb; pdb.set_trace()
model.eval()
model.to(DEVICE)
y_pred = []; y_true=[]
with torch.no_grad():
for index, batch in enumerate(test_loader):
x, y, l = batch
pred = model(x, l)
y_pred.append(pred)
y_true.append(y)
# yp = [y[0].max(0)[1].item() for y in y_pred]
yp = []
for i in range(len(y_pred)):
for j in range(len(y_pred[0])):
yp.append(y_pred[i][j].max(0)[1].item())
yt = []
# yt = [y.item() for y in y_true]
for i in range(len(y_true)):
for j in range(len(y_true[0])):
yt.append(y_true[i][j].item())
f1 = sklearn.metrics.f1_score(yp, yt, average='macro')
uar = sklearn.metrics.recall_score(yp, yt, average='macro')
print("F1: {}".format(f1))
print("UAR: {}".format(uar))
return 1
def __call__(self, engine: Engine, model, evaluator: Engine,
dataloader: DataLoader, test_loader: DataLoader, validation: bool = True):
if engine.state.epoch % self.validate_every != 0:
return
evaluator.run(dataloader)
system.print_separator(n=35, print_fn=self.print_fn)
metrics = evaluator.state.metrics
phase = 'Validation' if validation else 'Training'
self.print_fn('Epoch {} {} results'
.format(engine.state.epoch, phase))
system.print_separator(symbol='-', n=35, print_fn=self.print_fn)
for name, value in metrics.items():
self.print_fn('{:<15} {:<15}'.format(name, value))
if validation and self.early_stopping:
loss = self.early_stopping.best_score
patience = self.early_stopping.patience
cntr = self.early_stopping.counter
self.print_fn('{:<15} {:<15}'.format('best loss', -loss))
self.print_fn('{:<15} {:<15}'.format('patience left',
patience - cntr))
system.print_separator(n=35, print_fn=self.print_fn)
self.predict_testset(model, test_loader)
def attach(self, model, trainer: Engine, evaluator: Engine,
dataloader: DataLoader, test_loader:DataLoader, validation: bool = True):
trainer.add_event_handler(
Events.EPOCH_COMPLETED,
self, model, evaluator, dataloader, test_loader,
validation=validation) | slp/trainer/handlers_test.py | import shutil
import sklearn
import torch
from ignite.contrib.handlers import ProgressBar
from ignite.engine import Engine, Events
from ignite.handlers import ModelCheckpoint, EarlyStopping
from torch.utils.data import DataLoader
from typing import Optional
from slp.util import system
from slp.util import types
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
#DEVICE = 'cpu'
class CheckpointHandler(ModelCheckpoint):
"""Augment ignite ModelCheckpoint Handler with copying the best file to a
{filename_prefix}_{experiment_name}.best.pth.
This helps for automatic testing etc.
Args:
engine (ignite.engine.Engine): The trainer engine
to_save (dict): The objects to save
"""
def __call__(self, engine: Engine, to_save: types.GenericDict) -> None:
super(CheckpointHandler, self).__call__(engine, to_save)
# Select model with best loss
_, paths = self._saved[-1]
for src in paths:
splitted = src.split('_')
fname_prefix = splitted[0]
name = splitted[1]
dst = f'{fname_prefix}_{name}.best.pth'
shutil.copy(src, dst)
class EvaluationHandler(object):
def __init__(self, pbar: Optional[ProgressBar] = None,
validate_every: int = 1,
early_stopping: Optional[EarlyStopping] = None):
self.validate_every = validate_every
self.print_fn = pbar.log_message if pbar is not None else print
self.early_stopping = early_stopping
def predict_testset(self, model, test_loader):
# import pdb; pdb.set_trace()
model.eval()
model.to(DEVICE)
y_pred = []; y_true=[]
with torch.no_grad():
for index, batch in enumerate(test_loader):
x, y, l = batch
pred = model(x, l)
y_pred.append(pred)
y_true.append(y)
# yp = [y[0].max(0)[1].item() for y in y_pred]
yp = []
for i in range(len(y_pred)):
for j in range(len(y_pred[0])):
yp.append(y_pred[i][j].max(0)[1].item())
yt = []
# yt = [y.item() for y in y_true]
for i in range(len(y_true)):
for j in range(len(y_true[0])):
yt.append(y_true[i][j].item())
f1 = sklearn.metrics.f1_score(yp, yt, average='macro')
uar = sklearn.metrics.recall_score(yp, yt, average='macro')
print("F1: {}".format(f1))
print("UAR: {}".format(uar))
return 1
def __call__(self, engine: Engine, model, evaluator: Engine,
dataloader: DataLoader, test_loader: DataLoader, validation: bool = True):
if engine.state.epoch % self.validate_every != 0:
return
evaluator.run(dataloader)
system.print_separator(n=35, print_fn=self.print_fn)
metrics = evaluator.state.metrics
phase = 'Validation' if validation else 'Training'
self.print_fn('Epoch {} {} results'
.format(engine.state.epoch, phase))
system.print_separator(symbol='-', n=35, print_fn=self.print_fn)
for name, value in metrics.items():
self.print_fn('{:<15} {:<15}'.format(name, value))
if validation and self.early_stopping:
loss = self.early_stopping.best_score
patience = self.early_stopping.patience
cntr = self.early_stopping.counter
self.print_fn('{:<15} {:<15}'.format('best loss', -loss))
self.print_fn('{:<15} {:<15}'.format('patience left',
patience - cntr))
system.print_separator(n=35, print_fn=self.print_fn)
self.predict_testset(model, test_loader)
def attach(self, model, trainer: Engine, evaluator: Engine,
dataloader: DataLoader, test_loader:DataLoader, validation: bool = True):
trainer.add_event_handler(
Events.EPOCH_COMPLETED,
self, model, evaluator, dataloader, test_loader,
validation=validation) | 0.850065 | 0.259996 |
import autograd.numpy as np
from autograd import hessian
from scipy.optimize import minimize
import matplotlib.pyplot as plt
from matplotlib import rcParams, rcParamsDefault
from scipy.optimize import brentq
from autograd import grad
from experiments.visualization.visualization_utils import get_figsize
def func_sine(X):
return np.sin(X)
def plot_admissible_region(X0, xlim, ylim, func_to_convex, l_thresh, u_thresh, left_range, right_range):
rcParams['pdf.fonttype'] = 42
rcParams['ps.fonttype'] = 42
rcParams.update({'legend.fontsize': 5.4})
rcParams.update({'font.size': 5.8})
# textwidth is 506.295, columnwidth is 241.14749
fig = plt.figure(figsize=get_figsize(columnwidth=506.295, wf=0.32, hf=0.42))
ax = fig.gca()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
# Make data
X = np.arange(xlim[0], xlim[1], 0.01)
Y = func_to_convex(X)
# Plot the sine in the domain
ax.plot(X, Y, label="$sin(x)$", color="black", linewidth=0.9)
ax.plot(X0, func_to_convex(X0), 'o', label="$x_0$", color="black", markersize=3)
ax.axvspan(xmin=left_range, xmax=right_range, edgecolor="none", facecolor="grey", alpha=0.25)
plt.xticks([left_range, right_range])
ax.hlines(y=u_thresh, xmin=xlim[0], xmax=xlim[1], colors='tab:blue', linestyles="--", label='$U$', linewidth=0.9)
ax.hlines(y=l_thresh, xmin=xlim[0], xmax=xlim[1], colors='tab:red', linestyles="--", label='$L$', linewidth=0.9)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_linewidth(0.5)
ax.spines['left'].set_linewidth(0.5)
ax.tick_params(width=0.5)
handles, labels = ax.get_legend_handles_labels()
plt.legend(handles, labels, loc='upper center', bbox_to_anchor=(0.5, 1.34), ncol=4, columnspacing=1, handlelength=1.7, frameon=False)
plt.subplots_adjust(top=0.82, bottom=0.18, left=0.09, right=0.96, hspace=0.08, wspace=0.2)
fig_file_name = "sine_admissible_region.pdf"
plt.savefig(fig_file_name)
plt.close(fig)
rcParams.update(rcParamsDefault)
def plot_2d_convex_diff(X0, xlim, ylim, func_to_convex, l_thresh, u_thresh, func_g_convex, func_h_convex, func_g_minus_l_tangent,
left_range, right_range):
rcParams['pdf.fonttype'] = 42
rcParams['ps.fonttype'] = 42
rcParams.update({'legend.fontsize': 5.4})
rcParams.update({'font.size': 5.8})
# textwidth is 506.295, columnwidth is 241.14749
fig = plt.figure(figsize=get_figsize(columnwidth=506.295, wf=0.32, hf=0.42))
ax = fig.gca()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
# Make data
X = np.arange(xlim[0], xlim[1], 0.01)
Y = func_to_convex(X)
# Plot the sine in the domain
ax.plot(X, Y, color="black", linewidth=0.9)
ax.plot(X0, func_to_convex(X0), 'o', color="black", markersize=3)
g_convex_val = func_g_convex(X)
h_convex_val = func_h_convex(X)
ax.plot(X, g_convex_val, label=r"$\breve{g}(x)$", color="tab:orange", linewidth=0.9)
ax.plot(X, h_convex_val, label=r"$\breve{h}(x)$", color="tab:green", linewidth=0.9)
g_minus_l_tangent_val = func_g_minus_l_tangent(X)
ax.fill_between(X, g_convex_val, u_thresh, where=g_convex_val < u_thresh, color='orange', alpha=0.5, interpolate=True)
ax.fill_between(X, h_convex_val, g_minus_l_tangent_val, where=h_convex_val < g_minus_l_tangent_val, color='green', alpha=0.3, interpolate=True)
ax.axvline(x=left_range, ymin=-1, ymax=1, linestyle=":", color="grey", linewidth=0.8)
ax.axvline(x=right_range, ymin=-1, ymax=1, linestyle=":", color="grey", linewidth=0.8)
plt.xticks([left_range, right_range])
ax.hlines(y=u_thresh, xmin=xlim[0], xmax=xlim[1], colors='tab:blue', linestyles="--", linewidth=0.9)
ax.hlines(y=l_thresh, xmin=xlim[0], xmax=xlim[1], colors='tab:red', linestyles="--", linewidth=0.9)
ax.plot(X, g_minus_l_tangent_val, label=r"$f(x_0)+\nabla f(x_0)^T (x-x_0) - L$", color="grey", linestyle="-.", linewidth=0.9)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_linewidth(0.5)
ax.spines['left'].set_linewidth(0.5)
ax.tick_params(width=0.5)
handles, labels = ax.get_legend_handles_labels()
plt.legend(handles, labels, loc='upper center', bbox_to_anchor=(0.5, 1.36), ncol=3, columnspacing=0.8, handlelength=1.7, frameon=False, handletextpad=0.8)
plt.subplots_adjust(top=0.82, bottom=0.18, left=0.09, right=0.96, hspace=0.08, wspace=0.2)
fig_file_name = "sine_convex_diff.pdf"
plt.savefig(fig_file_name)
plt.close(fig)
rcParams.update(rcParamsDefault)
def plot_2d_concave_diff(X0, xlim, ylim, func_to_concave, l_thresh, u_thresh, func_g_concave, func_h_concave, func_g_minus_u_tangent,
left_range, right_range):
rcParams['pdf.fonttype'] = 42
rcParams['ps.fonttype'] = 42
rcParams.update({'legend.fontsize': 5.4})
rcParams.update({'font.size': 5.8})
# textwidth is 506.295, columnwidth is 241.14749
fig = plt.figure(figsize=get_figsize(columnwidth=506.295, wf=0.32, hf=0.42))
ax = fig.gca()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
# Make data
X = np.arange(xlim[0], xlim[1], 0.01)
Y = func_to_concave(X)
# Plot the sine in the domain
ax.plot(X, Y, color="black", linewidth=0.9)
ax.plot(X0, func_to_concave(X0), 'o', color="black", markersize=3)
g_concave_val = func_g_concave(X)
h_concave_val = func_h_concave(X)
ax.plot(X, g_concave_val, label="$\hat{g}(x)$", color="tab:green", linewidth=0.9)
ax.plot(X, h_concave_val, label="$\hat{h}(x)$", color="tab:orange", linewidth=0.9)
g_minus_u_tangent_val = func_g_minus_u_tangent(X)
ax.fill_between(X, g_concave_val, l_thresh, where=g_concave_val > l_thresh, color='green', alpha=0.5, interpolate=True)
ax.fill_between(X, h_concave_val, g_minus_u_tangent_val, where=h_concave_val > g_minus_u_tangent_val, color='orange', alpha=0.3, interpolate=True)
ax.axvline(x=left_range, ymin=-1, ymax=1, linestyle=":", color="grey", linewidth=0.8)
ax.axvline(x=right_range, ymin=-1, ymax=1, linestyle=":", color="grey", linewidth=0.8)
plt.xticks([left_range, right_range])
ax.hlines(y=u_thresh, xmin=xlim[0], xmax=xlim[1], colors='tab:blue', linestyles="--", linewidth=0.9)
ax.hlines(y=l_thresh, xmin=xlim[0], xmax=xlim[1], colors='tab:red', linestyles="--", linewidth=0.9)
ax.plot(X, g_minus_u_tangent_val, label=r"$f(x_0) +\nabla f(x_0)^T (x-x_0) - U$", color="grey", linestyle="-.", linewidth=0.9)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_linewidth(0.5)
ax.spines['left'].set_linewidth(0.5)
ax.tick_params(width=0.5)
handles, labels = ax.get_legend_handles_labels()
plt.legend(handles, labels, loc='upper center', bbox_to_anchor=(0.5, 1.38), ncol=3, columnspacing=0.8, handlelength=1.7, frameon=False, handletextpad=0.8)
plt.subplots_adjust(top=0.82, bottom=0.18, left=0.09, right=0.96, hspace=0.08, wspace=0.2)
fig_file_name = "sine_concave_diff.pdf"
plt.savefig(fig_file_name)
plt.close(fig)
rcParams.update(rcParamsDefault)
# Minimize this function over x in a specific neighborhood around X0
def func_min_eigenvalue(x, args):
hess = args
eigenvalues, eigenvector = np.linalg.eig(hess(x))
min_eigenvalue = np.min(eigenvalues)
return min_eigenvalue
# Maximize this function over x in a specific neighborhood around X0
def func_max_eigenvalue(x, args):
hess = args
eigenvalues, eigenvector = np.linalg.eig(hess(x))
max_eigenvalue = np.max(eigenvalues)
return -1.0 * max_eigenvalue
def func_min_max_eigenvalues(func_to_convex, X0, domain):
hess = hessian(func_to_convex)
sol_min = minimize(func_min_eigenvalue, X0, args=hess, bounds=domain)
sol_max = minimize(func_max_eigenvalue, X0, args=hess, bounds=domain)
min_eigenvalue = sol_min.fun
minimizing_point = sol_min.x
max_eigenvalue = -1.0 * sol_max.fun
maximizing_point = sol_max.x
return min_eigenvalue, minimizing_point, max_eigenvalue, maximizing_point
def find_min_and_max_eigenvalues(func_to_convex, domain):
min_eigenvalue = np.inf
max_eigenvalue = -np.inf
# Start the optimization process from multiple points in the domain, then choose the max and min
rand_start_points = np.random.uniform(domain[0][0], domain[0][1], (10, len(domain)))
for start_point in rand_start_points:
start_point = np.array(start_point, dtype=np.float32)
min_eigenvalue_temp, minimizing_point, max_eigenvalue_temp, maximizing_point = func_min_max_eigenvalues(func_to_convex, start_point, domain)
min_eigenvalue = np.minimum(min_eigenvalue, min_eigenvalue_temp)
max_eigenvalue = np.maximum(max_eigenvalue, max_eigenvalue_temp)
assert (min_eigenvalue <= max_eigenvalue)
print("max_eigenvalue:", max_eigenvalue)
print("min_eigenvalue:", min_eigenvalue)
return min_eigenvalue, max_eigenvalue
def admissible_region(X0, func_to_convex, xlim=None, ylim=None):
l_thresh = func_to_convex(X0) - 0.2
u_thresh = func_to_convex(X0) + 0.2
func = lambda x: func_to_convex(x) - l_thresh
lower_thresh_root_left, lower_thresh_root_right = brentq(func, X0 - 2, X0), brentq(func, X0, X0 + 2)
plot_admissible_region(X0, xlim, ylim, func_to_convex, l_thresh, u_thresh, lower_thresh_root_left, lower_thresh_root_right)
def convex_diff(X0, min_eigenvalue, func_to_convex, xlim=None, ylim=None):
l_thresh = func_to_convex(X0) - 0.2
u_thresh = func_to_convex(X0) + 0.2
search_roots_distance = 10
eig = min_eigenvalue
g_convex = lambda x: func_to_convex(x) + 0.5 * np.abs(eig) * (x - X0) * (x - X0)
h_convex = lambda x: 0.5 * np.abs(eig) * (x - X0) * (x - X0)
grad_func_to_convex = grad(func_to_convex)
g_minus_l_tangent = lambda x: func_to_convex(X0) + grad_func_to_convex(X0) * (x - X0) - l_thresh
# Condition 1: g(x) < U.
# Check where g(x) = U and write in title
func = lambda x: g_convex(x) - u_thresh
upper_thresh_root_left, upper_thresh_root_right = brentq(func, X0 - search_roots_distance, X0), brentq(func, X0, X0 + search_roots_distance)
assert upper_thresh_root_left <= upper_thresh_root_right, str(upper_thresh_root_left) + "," + str(upper_thresh_root_right)
upper_safe_zone_size = upper_thresh_root_right - upper_thresh_root_left
# Condition 2: Tangent g(x)-L bigger than h(x)
# Check where Tangent g(x)-L = h(x) and write in figure title
func = lambda x: g_minus_l_tangent(x) - h_convex(x)
lower_thresh_root_left, lower_thresh_root_right = brentq(func, X0 - search_roots_distance, X0), brentq(func, X0, X0 + search_roots_distance)
assert lower_thresh_root_left <= lower_thresh_root_right, str(lower_thresh_root_left) + "," + str(lower_thresh_root_right)
lower_safe_zone_size = lower_thresh_root_right - lower_thresh_root_left
if upper_safe_zone_size == 0 or lower_safe_zone_size == 0:
safe_zone_size = 0
else:
safe_zone_size = np.minimum(upper_thresh_root_right, lower_thresh_root_right) - np.maximum(upper_thresh_root_left, lower_thresh_root_left)
assert safe_zone_size >= 0, str(safe_zone_size)
plot_2d_convex_diff(X0, xlim, ylim, func_to_convex, l_thresh, u_thresh, g_convex, h_convex, g_minus_l_tangent,
np.maximum(lower_thresh_root_left, upper_thresh_root_left),
np.minimum(lower_thresh_root_right, upper_thresh_root_right))
return safe_zone_size, upper_safe_zone_size, lower_safe_zone_size
def concave_diff(X0, max_eigenvalue, func_to_concave, xlim=None, ylim=None):
l_thresh = func_to_concave(X0) - 0.2
u_thresh = func_to_concave(X0) + 0.2
search_roots_distance = 10
g_concave = lambda x: func_to_concave(x) - 0.5 * max_eigenvalue * (x - X0) * (x - X0)
h_concave = lambda x: -0.5 * max_eigenvalue * (x - X0) * (x - X0)
grad_func_to_convex = grad(func_to_concave)
g_minus_u_tangent = lambda x: func_to_concave(X0) + grad_func_to_convex(X0) * (x - X0) - u_thresh
# Condition 1: g(x) > L.
# Check where g(x) = L and write in title
func = lambda x: g_concave(x) - l_thresh
lower_thresh_root_left, lower_thresh_root_right = brentq(func, X0 - search_roots_distance, X0), brentq(func, X0, X0 + search_roots_distance)
assert lower_thresh_root_left <= lower_thresh_root_right, str(lower_thresh_root_left) + "," + str(lower_thresh_root_right)
lower_safe_zone_size = lower_thresh_root_right - lower_thresh_root_left
# Condition 2: Tangent g(x)-U smaller than h(x)
# Check where Tangent g(x)-U = h(x) and write in figure title
func = lambda x: g_minus_u_tangent(x) - h_concave(x)
upper_thresh_root_left, upper_thresh_root_right = brentq(func, X0 - search_roots_distance, X0), brentq(func, X0, X0 + search_roots_distance)
assert upper_thresh_root_left <= upper_thresh_root_right, str(upper_thresh_root_left) + "," + str(upper_thresh_root_right)
upper_safe_zone_size = upper_thresh_root_right - upper_thresh_root_left
if upper_safe_zone_size == 0 or lower_safe_zone_size == 0:
safe_zone_size = 0
else:
safe_zone_size = np.minimum(upper_thresh_root_right, lower_thresh_root_right) - np.maximum(
upper_thresh_root_left, lower_thresh_root_left)
assert safe_zone_size >= 0, str(safe_zone_size)
plot_2d_concave_diff(X0, xlim, ylim, func_to_concave, l_thresh, u_thresh, g_concave, h_concave, g_minus_u_tangent,
np.maximum(lower_thresh_root_left, upper_thresh_root_left),
np.minimum(lower_thresh_root_right, upper_thresh_root_right))
return safe_zone_size, upper_safe_zone_size, lower_safe_zone_size
if __name__ == "__main__":
# Figure 1
X0 = 0.5 * np.pi
domain = ((X0 - 3, X0 + 3),)
xlim = [X0 - 2.1, X0 + 2.1]
ylim = [-0.5, 1.5]
X0 = np.array([X0])
min_eigenvalue, max_eigenvalue = find_min_and_max_eigenvalues(func_sine, domain)
# Plot the concave and convex diffs
convex_diff(X0, min_eigenvalue, func_sine, xlim=xlim, ylim=ylim)
concave_diff(X0, max_eigenvalue, func_sine, xlim=xlim, ylim=ylim)
admissible_region(X0, func_sine, xlim=xlim, ylim=ylim) | experiments/visualization/plot_sine_convex_vs_concave.py | import autograd.numpy as np
from autograd import hessian
from scipy.optimize import minimize
import matplotlib.pyplot as plt
from matplotlib import rcParams, rcParamsDefault
from scipy.optimize import brentq
from autograd import grad
from experiments.visualization.visualization_utils import get_figsize
def func_sine(X):
return np.sin(X)
def plot_admissible_region(X0, xlim, ylim, func_to_convex, l_thresh, u_thresh, left_range, right_range):
rcParams['pdf.fonttype'] = 42
rcParams['ps.fonttype'] = 42
rcParams.update({'legend.fontsize': 5.4})
rcParams.update({'font.size': 5.8})
# textwidth is 506.295, columnwidth is 241.14749
fig = plt.figure(figsize=get_figsize(columnwidth=506.295, wf=0.32, hf=0.42))
ax = fig.gca()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
# Make data
X = np.arange(xlim[0], xlim[1], 0.01)
Y = func_to_convex(X)
# Plot the sine in the domain
ax.plot(X, Y, label="$sin(x)$", color="black", linewidth=0.9)
ax.plot(X0, func_to_convex(X0), 'o', label="$x_0$", color="black", markersize=3)
ax.axvspan(xmin=left_range, xmax=right_range, edgecolor="none", facecolor="grey", alpha=0.25)
plt.xticks([left_range, right_range])
ax.hlines(y=u_thresh, xmin=xlim[0], xmax=xlim[1], colors='tab:blue', linestyles="--", label='$U$', linewidth=0.9)
ax.hlines(y=l_thresh, xmin=xlim[0], xmax=xlim[1], colors='tab:red', linestyles="--", label='$L$', linewidth=0.9)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_linewidth(0.5)
ax.spines['left'].set_linewidth(0.5)
ax.tick_params(width=0.5)
handles, labels = ax.get_legend_handles_labels()
plt.legend(handles, labels, loc='upper center', bbox_to_anchor=(0.5, 1.34), ncol=4, columnspacing=1, handlelength=1.7, frameon=False)
plt.subplots_adjust(top=0.82, bottom=0.18, left=0.09, right=0.96, hspace=0.08, wspace=0.2)
fig_file_name = "sine_admissible_region.pdf"
plt.savefig(fig_file_name)
plt.close(fig)
rcParams.update(rcParamsDefault)
def plot_2d_convex_diff(X0, xlim, ylim, func_to_convex, l_thresh, u_thresh, func_g_convex, func_h_convex, func_g_minus_l_tangent,
left_range, right_range):
rcParams['pdf.fonttype'] = 42
rcParams['ps.fonttype'] = 42
rcParams.update({'legend.fontsize': 5.4})
rcParams.update({'font.size': 5.8})
# textwidth is 506.295, columnwidth is 241.14749
fig = plt.figure(figsize=get_figsize(columnwidth=506.295, wf=0.32, hf=0.42))
ax = fig.gca()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
# Make data
X = np.arange(xlim[0], xlim[1], 0.01)
Y = func_to_convex(X)
# Plot the sine in the domain
ax.plot(X, Y, color="black", linewidth=0.9)
ax.plot(X0, func_to_convex(X0), 'o', color="black", markersize=3)
g_convex_val = func_g_convex(X)
h_convex_val = func_h_convex(X)
ax.plot(X, g_convex_val, label=r"$\breve{g}(x)$", color="tab:orange", linewidth=0.9)
ax.plot(X, h_convex_val, label=r"$\breve{h}(x)$", color="tab:green", linewidth=0.9)
g_minus_l_tangent_val = func_g_minus_l_tangent(X)
ax.fill_between(X, g_convex_val, u_thresh, where=g_convex_val < u_thresh, color='orange', alpha=0.5, interpolate=True)
ax.fill_between(X, h_convex_val, g_minus_l_tangent_val, where=h_convex_val < g_minus_l_tangent_val, color='green', alpha=0.3, interpolate=True)
ax.axvline(x=left_range, ymin=-1, ymax=1, linestyle=":", color="grey", linewidth=0.8)
ax.axvline(x=right_range, ymin=-1, ymax=1, linestyle=":", color="grey", linewidth=0.8)
plt.xticks([left_range, right_range])
ax.hlines(y=u_thresh, xmin=xlim[0], xmax=xlim[1], colors='tab:blue', linestyles="--", linewidth=0.9)
ax.hlines(y=l_thresh, xmin=xlim[0], xmax=xlim[1], colors='tab:red', linestyles="--", linewidth=0.9)
ax.plot(X, g_minus_l_tangent_val, label=r"$f(x_0)+\nabla f(x_0)^T (x-x_0) - L$", color="grey", linestyle="-.", linewidth=0.9)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_linewidth(0.5)
ax.spines['left'].set_linewidth(0.5)
ax.tick_params(width=0.5)
handles, labels = ax.get_legend_handles_labels()
plt.legend(handles, labels, loc='upper center', bbox_to_anchor=(0.5, 1.36), ncol=3, columnspacing=0.8, handlelength=1.7, frameon=False, handletextpad=0.8)
plt.subplots_adjust(top=0.82, bottom=0.18, left=0.09, right=0.96, hspace=0.08, wspace=0.2)
fig_file_name = "sine_convex_diff.pdf"
plt.savefig(fig_file_name)
plt.close(fig)
rcParams.update(rcParamsDefault)
def plot_2d_concave_diff(X0, xlim, ylim, func_to_concave, l_thresh, u_thresh, func_g_concave, func_h_concave, func_g_minus_u_tangent,
left_range, right_range):
rcParams['pdf.fonttype'] = 42
rcParams['ps.fonttype'] = 42
rcParams.update({'legend.fontsize': 5.4})
rcParams.update({'font.size': 5.8})
# textwidth is 506.295, columnwidth is 241.14749
fig = plt.figure(figsize=get_figsize(columnwidth=506.295, wf=0.32, hf=0.42))
ax = fig.gca()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
# Make data
X = np.arange(xlim[0], xlim[1], 0.01)
Y = func_to_concave(X)
# Plot the sine in the domain
ax.plot(X, Y, color="black", linewidth=0.9)
ax.plot(X0, func_to_concave(X0), 'o', color="black", markersize=3)
g_concave_val = func_g_concave(X)
h_concave_val = func_h_concave(X)
ax.plot(X, g_concave_val, label="$\hat{g}(x)$", color="tab:green", linewidth=0.9)
ax.plot(X, h_concave_val, label="$\hat{h}(x)$", color="tab:orange", linewidth=0.9)
g_minus_u_tangent_val = func_g_minus_u_tangent(X)
ax.fill_between(X, g_concave_val, l_thresh, where=g_concave_val > l_thresh, color='green', alpha=0.5, interpolate=True)
ax.fill_between(X, h_concave_val, g_minus_u_tangent_val, where=h_concave_val > g_minus_u_tangent_val, color='orange', alpha=0.3, interpolate=True)
ax.axvline(x=left_range, ymin=-1, ymax=1, linestyle=":", color="grey", linewidth=0.8)
ax.axvline(x=right_range, ymin=-1, ymax=1, linestyle=":", color="grey", linewidth=0.8)
plt.xticks([left_range, right_range])
ax.hlines(y=u_thresh, xmin=xlim[0], xmax=xlim[1], colors='tab:blue', linestyles="--", linewidth=0.9)
ax.hlines(y=l_thresh, xmin=xlim[0], xmax=xlim[1], colors='tab:red', linestyles="--", linewidth=0.9)
ax.plot(X, g_minus_u_tangent_val, label=r"$f(x_0) +\nabla f(x_0)^T (x-x_0) - U$", color="grey", linestyle="-.", linewidth=0.9)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_linewidth(0.5)
ax.spines['left'].set_linewidth(0.5)
ax.tick_params(width=0.5)
handles, labels = ax.get_legend_handles_labels()
plt.legend(handles, labels, loc='upper center', bbox_to_anchor=(0.5, 1.38), ncol=3, columnspacing=0.8, handlelength=1.7, frameon=False, handletextpad=0.8)
plt.subplots_adjust(top=0.82, bottom=0.18, left=0.09, right=0.96, hspace=0.08, wspace=0.2)
fig_file_name = "sine_concave_diff.pdf"
plt.savefig(fig_file_name)
plt.close(fig)
rcParams.update(rcParamsDefault)
# Minimize this function over x in a specific neighborhood around X0
def func_min_eigenvalue(x, args):
hess = args
eigenvalues, eigenvector = np.linalg.eig(hess(x))
min_eigenvalue = np.min(eigenvalues)
return min_eigenvalue
# Maximize this function over x in a specific neighborhood around X0
def func_max_eigenvalue(x, args):
hess = args
eigenvalues, eigenvector = np.linalg.eig(hess(x))
max_eigenvalue = np.max(eigenvalues)
return -1.0 * max_eigenvalue
def func_min_max_eigenvalues(func_to_convex, X0, domain):
hess = hessian(func_to_convex)
sol_min = minimize(func_min_eigenvalue, X0, args=hess, bounds=domain)
sol_max = minimize(func_max_eigenvalue, X0, args=hess, bounds=domain)
min_eigenvalue = sol_min.fun
minimizing_point = sol_min.x
max_eigenvalue = -1.0 * sol_max.fun
maximizing_point = sol_max.x
return min_eigenvalue, minimizing_point, max_eigenvalue, maximizing_point
def find_min_and_max_eigenvalues(func_to_convex, domain):
min_eigenvalue = np.inf
max_eigenvalue = -np.inf
# Start the optimization process from multiple points in the domain, then choose the max and min
rand_start_points = np.random.uniform(domain[0][0], domain[0][1], (10, len(domain)))
for start_point in rand_start_points:
start_point = np.array(start_point, dtype=np.float32)
min_eigenvalue_temp, minimizing_point, max_eigenvalue_temp, maximizing_point = func_min_max_eigenvalues(func_to_convex, start_point, domain)
min_eigenvalue = np.minimum(min_eigenvalue, min_eigenvalue_temp)
max_eigenvalue = np.maximum(max_eigenvalue, max_eigenvalue_temp)
assert (min_eigenvalue <= max_eigenvalue)
print("max_eigenvalue:", max_eigenvalue)
print("min_eigenvalue:", min_eigenvalue)
return min_eigenvalue, max_eigenvalue
def admissible_region(X0, func_to_convex, xlim=None, ylim=None):
l_thresh = func_to_convex(X0) - 0.2
u_thresh = func_to_convex(X0) + 0.2
func = lambda x: func_to_convex(x) - l_thresh
lower_thresh_root_left, lower_thresh_root_right = brentq(func, X0 - 2, X0), brentq(func, X0, X0 + 2)
plot_admissible_region(X0, xlim, ylim, func_to_convex, l_thresh, u_thresh, lower_thresh_root_left, lower_thresh_root_right)
def convex_diff(X0, min_eigenvalue, func_to_convex, xlim=None, ylim=None):
l_thresh = func_to_convex(X0) - 0.2
u_thresh = func_to_convex(X0) + 0.2
search_roots_distance = 10
eig = min_eigenvalue
g_convex = lambda x: func_to_convex(x) + 0.5 * np.abs(eig) * (x - X0) * (x - X0)
h_convex = lambda x: 0.5 * np.abs(eig) * (x - X0) * (x - X0)
grad_func_to_convex = grad(func_to_convex)
g_minus_l_tangent = lambda x: func_to_convex(X0) + grad_func_to_convex(X0) * (x - X0) - l_thresh
# Condition 1: g(x) < U.
# Check where g(x) = U and write in title
func = lambda x: g_convex(x) - u_thresh
upper_thresh_root_left, upper_thresh_root_right = brentq(func, X0 - search_roots_distance, X0), brentq(func, X0, X0 + search_roots_distance)
assert upper_thresh_root_left <= upper_thresh_root_right, str(upper_thresh_root_left) + "," + str(upper_thresh_root_right)
upper_safe_zone_size = upper_thresh_root_right - upper_thresh_root_left
# Condition 2: Tangent g(x)-L bigger than h(x)
# Check where Tangent g(x)-L = h(x) and write in figure title
func = lambda x: g_minus_l_tangent(x) - h_convex(x)
lower_thresh_root_left, lower_thresh_root_right = brentq(func, X0 - search_roots_distance, X0), brentq(func, X0, X0 + search_roots_distance)
assert lower_thresh_root_left <= lower_thresh_root_right, str(lower_thresh_root_left) + "," + str(lower_thresh_root_right)
lower_safe_zone_size = lower_thresh_root_right - lower_thresh_root_left
if upper_safe_zone_size == 0 or lower_safe_zone_size == 0:
safe_zone_size = 0
else:
safe_zone_size = np.minimum(upper_thresh_root_right, lower_thresh_root_right) - np.maximum(upper_thresh_root_left, lower_thresh_root_left)
assert safe_zone_size >= 0, str(safe_zone_size)
plot_2d_convex_diff(X0, xlim, ylim, func_to_convex, l_thresh, u_thresh, g_convex, h_convex, g_minus_l_tangent,
np.maximum(lower_thresh_root_left, upper_thresh_root_left),
np.minimum(lower_thresh_root_right, upper_thresh_root_right))
return safe_zone_size, upper_safe_zone_size, lower_safe_zone_size
def concave_diff(X0, max_eigenvalue, func_to_concave, xlim=None, ylim=None):
l_thresh = func_to_concave(X0) - 0.2
u_thresh = func_to_concave(X0) + 0.2
search_roots_distance = 10
g_concave = lambda x: func_to_concave(x) - 0.5 * max_eigenvalue * (x - X0) * (x - X0)
h_concave = lambda x: -0.5 * max_eigenvalue * (x - X0) * (x - X0)
grad_func_to_convex = grad(func_to_concave)
g_minus_u_tangent = lambda x: func_to_concave(X0) + grad_func_to_convex(X0) * (x - X0) - u_thresh
# Condition 1: g(x) > L.
# Check where g(x) = L and write in title
func = lambda x: g_concave(x) - l_thresh
lower_thresh_root_left, lower_thresh_root_right = brentq(func, X0 - search_roots_distance, X0), brentq(func, X0, X0 + search_roots_distance)
assert lower_thresh_root_left <= lower_thresh_root_right, str(lower_thresh_root_left) + "," + str(lower_thresh_root_right)
lower_safe_zone_size = lower_thresh_root_right - lower_thresh_root_left
# Condition 2: Tangent g(x)-U smaller than h(x)
# Check where Tangent g(x)-U = h(x) and write in figure title
func = lambda x: g_minus_u_tangent(x) - h_concave(x)
upper_thresh_root_left, upper_thresh_root_right = brentq(func, X0 - search_roots_distance, X0), brentq(func, X0, X0 + search_roots_distance)
assert upper_thresh_root_left <= upper_thresh_root_right, str(upper_thresh_root_left) + "," + str(upper_thresh_root_right)
upper_safe_zone_size = upper_thresh_root_right - upper_thresh_root_left
if upper_safe_zone_size == 0 or lower_safe_zone_size == 0:
safe_zone_size = 0
else:
safe_zone_size = np.minimum(upper_thresh_root_right, lower_thresh_root_right) - np.maximum(
upper_thresh_root_left, lower_thresh_root_left)
assert safe_zone_size >= 0, str(safe_zone_size)
plot_2d_concave_diff(X0, xlim, ylim, func_to_concave, l_thresh, u_thresh, g_concave, h_concave, g_minus_u_tangent,
np.maximum(lower_thresh_root_left, upper_thresh_root_left),
np.minimum(lower_thresh_root_right, upper_thresh_root_right))
return safe_zone_size, upper_safe_zone_size, lower_safe_zone_size
if __name__ == "__main__":
# Figure 1
X0 = 0.5 * np.pi
domain = ((X0 - 3, X0 + 3),)
xlim = [X0 - 2.1, X0 + 2.1]
ylim = [-0.5, 1.5]
X0 = np.array([X0])
min_eigenvalue, max_eigenvalue = find_min_and_max_eigenvalues(func_sine, domain)
# Plot the concave and convex diffs
convex_diff(X0, min_eigenvalue, func_sine, xlim=xlim, ylim=ylim)
concave_diff(X0, max_eigenvalue, func_sine, xlim=xlim, ylim=ylim)
admissible_region(X0, func_sine, xlim=xlim, ylim=ylim) | 0.768038 | 0.566858 |
from pydeep.core import Function
import numpy as np
class Loss(Function):
def backward(self):
gx = self.cache["local_gx"]
return gx
class CrossEntropyLoss(Loss):
def forward(self, x, y):
"""
Descr.: calculate the mean loss of the batch
:param x: final layer of the network (batch_size, n_dim),
no need for softmax as we calculate it here
:param y: truth values (batch_size, 1), each entry in the
array is the an integer indicating the class
of the data point
:return: the mean cross entropy loss of the batch
"""
exp_x = np.exp(x)
probs = exp_x / np.sum(exp_x, axis=1, keepdims=True)
logprobs = -np.log([probs[i, y[i]] for i in range(len(probs))])
crossentropy_loss = np.mean(logprobs)
self.cache["probs"] = probs
self.cache['y'] = y
return crossentropy_loss
def local_grad(self, x, y):
"""
Descr.: cache the local gradient for backprop.
:param x: final layer of the network (batch_size, n_dim),
no need for softmax as we calculate it here
:param y: truth values (batch_size, 1), each entry in the
array is the an integer indicating the class
of the data point
"""
probs = self.cache["probs"]
truth = np.zeros_like(probs)
for i, j in enumerate(y):
truth[i, j] = 1.0
self.cache["local_gx"] = (probs - truth) / float(len(x))
class MSELoss(Loss):
def forward(self, x, y):
"""
Descr.: calculate mean loss of the batch, the avg. loss of the current batch
:param x: predicted values, (batch_size, 1)
:param y: truth values, (batch_sze, 1)
:return: scalar value, the mse of the batch
"""
loss = ((x - y)**2).mean()
return loss
def local_grad(self, x, y):
"""
Descr.: calulate dL/dx, and cache it for backprop.
:param x: predicted values, (batch_size, 1)
:param y: truth values, (batch_sze, 1)
"""
self.cache["local_gx"] = 2 * (x - y) / x.shape[0] | pydeep/loss.py | from pydeep.core import Function
import numpy as np
class Loss(Function):
def backward(self):
gx = self.cache["local_gx"]
return gx
class CrossEntropyLoss(Loss):
def forward(self, x, y):
"""
Descr.: calculate the mean loss of the batch
:param x: final layer of the network (batch_size, n_dim),
no need for softmax as we calculate it here
:param y: truth values (batch_size, 1), each entry in the
array is the an integer indicating the class
of the data point
:return: the mean cross entropy loss of the batch
"""
exp_x = np.exp(x)
probs = exp_x / np.sum(exp_x, axis=1, keepdims=True)
logprobs = -np.log([probs[i, y[i]] for i in range(len(probs))])
crossentropy_loss = np.mean(logprobs)
self.cache["probs"] = probs
self.cache['y'] = y
return crossentropy_loss
def local_grad(self, x, y):
"""
Descr.: cache the local gradient for backprop.
:param x: final layer of the network (batch_size, n_dim),
no need for softmax as we calculate it here
:param y: truth values (batch_size, 1), each entry in the
array is the an integer indicating the class
of the data point
"""
probs = self.cache["probs"]
truth = np.zeros_like(probs)
for i, j in enumerate(y):
truth[i, j] = 1.0
self.cache["local_gx"] = (probs - truth) / float(len(x))
class MSELoss(Loss):
def forward(self, x, y):
"""
Descr.: calculate mean loss of the batch, the avg. loss of the current batch
:param x: predicted values, (batch_size, 1)
:param y: truth values, (batch_sze, 1)
:return: scalar value, the mse of the batch
"""
loss = ((x - y)**2).mean()
return loss
def local_grad(self, x, y):
"""
Descr.: calulate dL/dx, and cache it for backprop.
:param x: predicted values, (batch_size, 1)
:param y: truth values, (batch_sze, 1)
"""
self.cache["local_gx"] = 2 * (x - y) / x.shape[0] | 0.905641 | 0.706558 |
import hashlib
import os
import pytest
from pex import dist_metadata, resolver, targets
from pex.fetcher import URLFetcher
from pex.pip.tool import PackageIndexConfiguration
from pex.resolve.configured_resolver import ConfiguredResolver
from pex.resolve.locked_resolve import LockConfiguration, LockedResolve, LockStyle
from pex.resolve.lockfile.operations import LockObserver
from pex.resolve.resolved_requirement import Pin
from pex.resolve.resolver_configuration import PipConfiguration
from pex.resolve.testing import normalize_locked_resolve
from pex.resolver import Downloaded, LocalDistribution, WheelBuilder
from pex.typing import TYPE_CHECKING
from pex.util import CacheHelper
if TYPE_CHECKING:
from typing import Any, Dict, Iterable, Tuple
def normalize(
locked_resolves, # type: Tuple[LockedResolve, ...]
skip_additional_artifacts=False, # type: bool
skip_urls=False, # type: bool
skip_verified=False, # type: bool
):
# type: (...) -> Tuple[LockedResolve, ...]
return tuple(
normalize_locked_resolve(
lock,
skip_additional_artifacts=skip_additional_artifacts,
skip_urls=skip_urls,
skip_verified=skip_verified,
)
for lock in locked_resolves
)
def create_lock_observer(lock_configuration):
# type: (LockConfiguration) -> LockObserver
pip_configuration = PipConfiguration()
return LockObserver(
lock_configuration=lock_configuration,
resolver=ConfiguredResolver(pip_configuration=pip_configuration),
wheel_builder=WheelBuilder(
package_index_configuration=PackageIndexConfiguration.create(
resolver_version=pip_configuration.resolver_version,
indexes=pip_configuration.repos_configuration.indexes,
find_links=pip_configuration.repos_configuration.find_links,
network_configuration=pip_configuration.network_configuration,
),
prefer_older_binary=pip_configuration.prefer_older_binary,
use_pep517=pip_configuration.use_pep517,
build_isolation=pip_configuration.build_isolation,
),
url_fetcher=URLFetcher(network_configuration=pip_configuration.network_configuration),
)
def create_lock(
lock_configuration, # type: LockConfiguration
**kwargs # type: Any
):
# type: (...) -> Tuple[Downloaded, Tuple[LockedResolve, ...]]
lock_observer = create_lock_observer(lock_configuration)
downloaded = resolver.download(observer=lock_observer, **kwargs)
return downloaded, lock_observer.lock(downloaded)
@pytest.mark.parametrize(
"requirements",
(
pytest.param(["ansicolors==1.1.8"], id="pinned-no-transitive-deps"),
pytest.param(["isort==4.3.21"], id="pinned-transitive-deps"),
pytest.param(["ansicolors"], id="float-no-transitive-deps"),
pytest.param(["isort"], id="float-transitive-deps"),
),
)
@pytest.mark.parametrize(
"lock_configuration",
(
pytest.param(LockConfiguration(style=LockStyle.STRICT), id="strict"),
pytest.param(LockConfiguration(style=LockStyle.SOURCES), id="sources"),
),
)
def test_lock_single_target(
tmpdir, # type: Any
requirements, # type: Iterable[str]
lock_configuration, # type: LockConfiguration
):
# type: (...) -> None
downloaded, locked_resolves = create_lock(lock_configuration, requirements=requirements)
assert 1 == len(locked_resolves)
lock = locked_resolves[0]
assert targets.current().platform.tag == lock.platform_tag
def pin(local_distribution):
# type: (LocalDistribution) -> Pin
project_name_and_version = dist_metadata.project_name_and_version(local_distribution.path)
assert project_name_and_version is not None
return Pin.canonicalize(project_name_and_version)
local_distributions_by_pin = {
pin(local_dist): local_dist for local_dist in downloaded.local_distributions
} # type: Dict[Pin, LocalDistribution]
assert sorted(local_distributions_by_pin) == sorted(
locked_req.pin for locked_req in lock.locked_requirements
), (
"Expected the actual set of downloaded distributions to match the set of pinned "
"requirements in the lock."
)
for locked_req in lock.locked_requirements:
fingerprint = locked_req.artifact.fingerprint
assert fingerprint.hash == CacheHelper.hash(
path=local_distributions_by_pin[locked_req.pin].path,
hasher=lambda: hashlib.new(fingerprint.algorithm),
), (
"Expected the fingerprint of the downloaded distribution to match the fingerprint "
"recorded in the lock."
)
find_links_repo = os.path.join(str(tmpdir), "find-links")
os.mkdir(find_links_repo)
for local_dist in downloaded.local_distributions:
os.symlink(
local_dist.path, os.path.join(find_links_repo, os.path.basename(local_dist.path))
)
_, find_links_locked_resolves = create_lock(
lock_configuration,
requirements=requirements,
indexes=[],
find_links=[find_links_repo],
)
assert normalize(
locked_resolves, skip_additional_artifacts=True, skip_urls=True, skip_verified=True
) == normalize(
find_links_locked_resolves,
skip_additional_artifacts=True,
skip_urls=True,
skip_verified=True,
), (
"Expected a find-links lock to match an equivalent PyPI lock except for the primary "
"artifact urls and their verification status and lack of additional artifacts (since these "
"are never downloaded; but instead, just recorded)."
)
lock_file = os.path.join(str(tmpdir), "requirements.txt")
with open(lock_file, "w") as fp:
lock.emit_requirements(fp)
_, export_locked_resolves = create_lock(lock_configuration, requirement_files=[lock_file])
assert normalize(locked_resolves) == normalize(export_locked_resolves), (
"Expected the download used to create a lock to be reproduced by a download using the "
"requirements generated from the lock."
) | tests/integration/test_locked_resolve.py |
import hashlib
import os
import pytest
from pex import dist_metadata, resolver, targets
from pex.fetcher import URLFetcher
from pex.pip.tool import PackageIndexConfiguration
from pex.resolve.configured_resolver import ConfiguredResolver
from pex.resolve.locked_resolve import LockConfiguration, LockedResolve, LockStyle
from pex.resolve.lockfile.operations import LockObserver
from pex.resolve.resolved_requirement import Pin
from pex.resolve.resolver_configuration import PipConfiguration
from pex.resolve.testing import normalize_locked_resolve
from pex.resolver import Downloaded, LocalDistribution, WheelBuilder
from pex.typing import TYPE_CHECKING
from pex.util import CacheHelper
if TYPE_CHECKING:
from typing import Any, Dict, Iterable, Tuple
def normalize(
locked_resolves, # type: Tuple[LockedResolve, ...]
skip_additional_artifacts=False, # type: bool
skip_urls=False, # type: bool
skip_verified=False, # type: bool
):
# type: (...) -> Tuple[LockedResolve, ...]
return tuple(
normalize_locked_resolve(
lock,
skip_additional_artifacts=skip_additional_artifacts,
skip_urls=skip_urls,
skip_verified=skip_verified,
)
for lock in locked_resolves
)
def create_lock_observer(lock_configuration):
# type: (LockConfiguration) -> LockObserver
pip_configuration = PipConfiguration()
return LockObserver(
lock_configuration=lock_configuration,
resolver=ConfiguredResolver(pip_configuration=pip_configuration),
wheel_builder=WheelBuilder(
package_index_configuration=PackageIndexConfiguration.create(
resolver_version=pip_configuration.resolver_version,
indexes=pip_configuration.repos_configuration.indexes,
find_links=pip_configuration.repos_configuration.find_links,
network_configuration=pip_configuration.network_configuration,
),
prefer_older_binary=pip_configuration.prefer_older_binary,
use_pep517=pip_configuration.use_pep517,
build_isolation=pip_configuration.build_isolation,
),
url_fetcher=URLFetcher(network_configuration=pip_configuration.network_configuration),
)
def create_lock(
lock_configuration, # type: LockConfiguration
**kwargs # type: Any
):
# type: (...) -> Tuple[Downloaded, Tuple[LockedResolve, ...]]
lock_observer = create_lock_observer(lock_configuration)
downloaded = resolver.download(observer=lock_observer, **kwargs)
return downloaded, lock_observer.lock(downloaded)
@pytest.mark.parametrize(
"requirements",
(
pytest.param(["ansicolors==1.1.8"], id="pinned-no-transitive-deps"),
pytest.param(["isort==4.3.21"], id="pinned-transitive-deps"),
pytest.param(["ansicolors"], id="float-no-transitive-deps"),
pytest.param(["isort"], id="float-transitive-deps"),
),
)
@pytest.mark.parametrize(
"lock_configuration",
(
pytest.param(LockConfiguration(style=LockStyle.STRICT), id="strict"),
pytest.param(LockConfiguration(style=LockStyle.SOURCES), id="sources"),
),
)
def test_lock_single_target(
tmpdir, # type: Any
requirements, # type: Iterable[str]
lock_configuration, # type: LockConfiguration
):
# type: (...) -> None
downloaded, locked_resolves = create_lock(lock_configuration, requirements=requirements)
assert 1 == len(locked_resolves)
lock = locked_resolves[0]
assert targets.current().platform.tag == lock.platform_tag
def pin(local_distribution):
# type: (LocalDistribution) -> Pin
project_name_and_version = dist_metadata.project_name_and_version(local_distribution.path)
assert project_name_and_version is not None
return Pin.canonicalize(project_name_and_version)
local_distributions_by_pin = {
pin(local_dist): local_dist for local_dist in downloaded.local_distributions
} # type: Dict[Pin, LocalDistribution]
assert sorted(local_distributions_by_pin) == sorted(
locked_req.pin for locked_req in lock.locked_requirements
), (
"Expected the actual set of downloaded distributions to match the set of pinned "
"requirements in the lock."
)
for locked_req in lock.locked_requirements:
fingerprint = locked_req.artifact.fingerprint
assert fingerprint.hash == CacheHelper.hash(
path=local_distributions_by_pin[locked_req.pin].path,
hasher=lambda: hashlib.new(fingerprint.algorithm),
), (
"Expected the fingerprint of the downloaded distribution to match the fingerprint "
"recorded in the lock."
)
find_links_repo = os.path.join(str(tmpdir), "find-links")
os.mkdir(find_links_repo)
for local_dist in downloaded.local_distributions:
os.symlink(
local_dist.path, os.path.join(find_links_repo, os.path.basename(local_dist.path))
)
_, find_links_locked_resolves = create_lock(
lock_configuration,
requirements=requirements,
indexes=[],
find_links=[find_links_repo],
)
assert normalize(
locked_resolves, skip_additional_artifacts=True, skip_urls=True, skip_verified=True
) == normalize(
find_links_locked_resolves,
skip_additional_artifacts=True,
skip_urls=True,
skip_verified=True,
), (
"Expected a find-links lock to match an equivalent PyPI lock except for the primary "
"artifact urls and their verification status and lack of additional artifacts (since these "
"are never downloaded; but instead, just recorded)."
)
lock_file = os.path.join(str(tmpdir), "requirements.txt")
with open(lock_file, "w") as fp:
lock.emit_requirements(fp)
_, export_locked_resolves = create_lock(lock_configuration, requirement_files=[lock_file])
assert normalize(locked_resolves) == normalize(export_locked_resolves), (
"Expected the download used to create a lock to be reproduced by a download using the "
"requirements generated from the lock."
) | 0.767516 | 0.164382 |
from lxml import etree
import os.path
import os
def rename(filepath, srcext, destext):
if os.name == 'nt':
# Can't rename to existing files, so we have to move the old one, then
# rename, then delete the old one.
if os.path.exists(filepath + destext):
if os.path.exists(filepath + '.bak'):
os.unlink(filepath + '.bak')
os.rename(filepath + destext, filepath + ".bak")
os.rename(filepath + srcext, filepath + destext)
os.unlink(filepath + ".bak")
else:
os.rename(filepath + srcext, filepath + destext)
else:
os.rename(filepath + srcext, filepath + destext)
class XmlStore(object):
class CachedTree(object):
"""A cached XML tree, with some properties.
"""
__slots__ = ("tree", "modified")
def __init__(self, tree, modified=False):
self.tree = tree
self.modified = modified
def __len__(self):
return len(self.tree.getroot())
def write(self, filepath):
"""Write the cached tree to a given filepath.
The ".xml" extension is added to the end of the filepath.
"""
if not os.path.exists(os.path.dirname(filepath)):
os.makedirs(os.path.dirname(filepath))
fd = open(filepath + ".tmp", "wb")
try:
self.tree.write(fd, encoding="UTF-8", xml_declaration=True)
finally:
fd.close()
# Atomic replace of the old file with the new one.
rename(filepath, ".tmp", ".xml")
self.modified = False
def set(self, record):
"""Set a record.
The record must be an Element, with the "id" property set to the id
for the record.
"""
self.modified = True
record_id = record.get(u'id')
assert record_id is not None
old = self.tree.xpath('/*/*[@id=$id]', id=record_id)
if len(old) != 0:
old[0].getparent().remove(old[0])
self.tree.getroot().append(record)
def remove(self, id):
"""Remove the record with the given id.
Raises KeyError if the record isn't found.
"""
self.modified = True
old = self.tree.xpath('/*/*[@id=$id]', id=id)
if len(old) == 0:
raise KeyError("Key with id %r not found" % id)
assert len(old) == 1
old[0].getparent().remove(old[0])
def get(self, id):
"""Get the record with the given id.
Raises KeyError if the record isn't found.
"""
old = self.tree.xpath('/*/*[@id=$id]', id=id)
if len(old) == 0:
raise KeyError("Key with id %r not found" % id)
assert len(old) == 1
return old[0]
class Cache(object):
"""A cache of parsed XML files.
"""
def __init__(self):
self.cache = {}
def __del__(self):
"""Flush any modified trees on delete.
Don't write code relying on this method; it's here to try and
prevent dataloss, but you should always call flush() (or clear())
explicitly before exiting.
"""
self.flush()
def flush(self):
"""Write any modified trees in the cache to disk.
"""
for filepath, tree in self.cache.iteritems():
if tree.modified:
tree.write(filepath)
def clear(self):
"""Clear the cache, flushing any modified trees.
"""
self.flush()
self.cache = {}
def get(self, filepath):
"""Get a tree from the cache.
Returns a CachedTree, creating an empty one if the file didn't
exist.
"""
tree = self.cache.get(filepath, None)
if tree is not None:
return tree
# Clear the cache - for now, we only keep one parsed file in it,
# but this could easily be modified in future.
self.clear()
if os.path.exists(filepath + '.xml'):
fd = open(filepath + '.xml', "rb")
try:
tree = XmlStore.CachedTree(etree.parse(fd))
finally:
fd.close()
elif os.path.exists(filepath + '.bak'):
# This can happen on windows if we crashed after renaming the
# old file away, but before writing the new one.
fd = open(filepath + '.bak', "rb")
try:
tree = XmlStore.CachedTree(etree.parse(fd))
finally:
fd.close()
else:
# Make an empty container tree.
# Don't mark it as modified, because if no entries get put into
# it, we don't want to write it back.
xmltree = etree.ElementTree(etree.Element("container"))
tree = XmlStore.CachedTree(xmltree)
self.cache[filepath] = tree
return tree
def __init__(self, topdir, items_per_file=100):
"""Make a new XML store, in the directory given by topdir.
The XML store stores the records it is given in a set of XML files. It
puts at most `items_per_file` items in each file.
"""
self.topdir = topdir
# Dictionary mapping from id to file number.
self.idmap = {}
self.idmap_modified = False
# Meta info dictionary.
self.meta = {}
# Next file number to allocate.
self.next_num = 0
# Number of files to put in each directory.
self.files_per_dir = 10
# Number of items to put in each file.
self.items_per_file = items_per_file
# Cache of parsed files.
self.tree_cache = XmlStore.Cache()
# Read the idmap from idmap.xml
self.idmap_path = os.path.join(self.topdir, 'idmap')
fd = None
if os.path.exists(self.idmap_path + ".xml"):
fd = open(self.idmap_path + ".xml", "rb")
elif os.path.exists(self.idmap_path + ".bak"):
fd = open(self.idmap_path + ".bak", "rb")
if fd is not None:
try:
idmap_xml = etree.parse(fd)
finally:
fd.close()
for elt in idmap_xml.xpath(u'/*/id'):
num = int(elt.text)
self.idmap[elt.get(u'id')] = num
if num > self.next_num:
self.next_num = num
for elt in idmap_xml.xpath(u'/*/meta'):
self.meta[elt.get(u'name')] = elt.text
def __del__(self):
"""Flush any modified trees on delete.
Don't write code relying on this method; it's here to try and
prevent dataloss, but you should always call flush() or close()
explicitly before exiting.
"""
self.flush()
def __len__(self):
return len(self.idmap)
def __iter__(self):
keys = self.idmap.keys()
keys.sort()
for key in keys:
yield key
def close(self):
"""Cleanup, flushing all changes to disk, and dropping any cache items.
"""
self.flush()
self.tree_cache.clear()
self.idmap = None
def flush(self):
"""Flush all changes to disk.
"""
if self.idmap_modified:
map = etree.Element("idmap")
for id in sorted(self.idmap.keys()):
elt = etree.SubElement(map, "id")
elt.set('id', id)
elt.text = unicode(self.idmap[id])
for name in sorted(self.meta.keys()):
elt = etree.SubElement(map, "meta")
elt.set('name', name)
elt.text = unicode(self.meta[name])
map = etree.ElementTree(map)
if not os.path.exists(os.path.dirname(self.idmap_path)):
os.makedirs(os.path.dirname(self.idmap_path))
fd = open(self.idmap_path + ".tmp", "wb")
try:
map.write(fd, encoding="UTF-8", xml_declaration=True)
finally:
fd.close()
# Atomic replace of the old file with the new one.
rename(self.idmap_path, ".tmp", ".xml")
self.idmap_modified = False
self.tree_cache.flush()
def _num_to_path(self, filenum):
"""Convert a file number to the path for that file.
Returns the path without the extension.
"""
components = []
while filenum >= self.files_per_dir:
components.append(filenum % self.files_per_dir)
filenum = filenum // self.files_per_dir
components.append(filenum)
components.reverse()
result = []
for c in components[:-1]:
result.append("D%d" % c)
result.append("F%d" % components[-1])
return os.path.join(self.topdir, *result)
def _get_tree(self, filenum):
"""Get the tree for a given filenum.
"""
return self.tree_cache.get(self._num_to_path(filenum))
def set(self, record):
"""Set a record.
`record` should be an lxml Element object, or a unicode string
containing raw XML. The record must have an "id" attribute containing
the ID to use.
If a record with the same id already exists, it is replaced.
"""
assert not isinstance(record, str)
if isinstance(record, unicode):
record = etree.fromstring(record)
id = record.get(u'id')
assert id is not None
filenum = self.idmap.get(id, None)
if filenum is None:
filenum = self.next_num
tree = self._get_tree(filenum)
if len(tree) >= self.items_per_file:
self.next_num += 1
filenum = self.next_num
tree = self._get_tree(filenum)
self.idmap[id] = filenum
self.idmap_modified = True
else:
tree = self._get_tree(filenum)
tree.set(record)
def remove(self, id):
"""Remove the record with a given id.
Raises KeyError if the record isn't found.
"""
filenum = self.idmap[id]
tree = self._get_tree(filenum)
tree.remove(id)
del self.idmap[id]
self.idmap_modified = True
def get(self, id):
"""Get the record with a given id.
Raises KeyError if the record isn't found.
"""
filenum = self.idmap[id]
tree = self._get_tree(filenum)
return tree.get(id)
def get_meta(self, name):
"""Get a metadata value.
Raises KeyError if the value isn't found.
"""
assert isinstance(name, unicode)
return self.meta[name]
def set_meta(self, name, value):
"""Set a metadata value.
"""
assert isinstance(name, unicode)
assert isinstance(value, unicode)
self.meta[name] = value
self.idmap_modified = True
def del_meta(self, name):
"""Delete a metadata value.
"""
assert isinstance(name, unicode)
del self.meta[name]
self.idmap_modified = True | web/xmlstore.py | from lxml import etree
import os.path
import os
def rename(filepath, srcext, destext):
if os.name == 'nt':
# Can't rename to existing files, so we have to move the old one, then
# rename, then delete the old one.
if os.path.exists(filepath + destext):
if os.path.exists(filepath + '.bak'):
os.unlink(filepath + '.bak')
os.rename(filepath + destext, filepath + ".bak")
os.rename(filepath + srcext, filepath + destext)
os.unlink(filepath + ".bak")
else:
os.rename(filepath + srcext, filepath + destext)
else:
os.rename(filepath + srcext, filepath + destext)
class XmlStore(object):
class CachedTree(object):
"""A cached XML tree, with some properties.
"""
__slots__ = ("tree", "modified")
def __init__(self, tree, modified=False):
self.tree = tree
self.modified = modified
def __len__(self):
return len(self.tree.getroot())
def write(self, filepath):
"""Write the cached tree to a given filepath.
The ".xml" extension is added to the end of the filepath.
"""
if not os.path.exists(os.path.dirname(filepath)):
os.makedirs(os.path.dirname(filepath))
fd = open(filepath + ".tmp", "wb")
try:
self.tree.write(fd, encoding="UTF-8", xml_declaration=True)
finally:
fd.close()
# Atomic replace of the old file with the new one.
rename(filepath, ".tmp", ".xml")
self.modified = False
def set(self, record):
"""Set a record.
The record must be an Element, with the "id" property set to the id
for the record.
"""
self.modified = True
record_id = record.get(u'id')
assert record_id is not None
old = self.tree.xpath('/*/*[@id=$id]', id=record_id)
if len(old) != 0:
old[0].getparent().remove(old[0])
self.tree.getroot().append(record)
def remove(self, id):
"""Remove the record with the given id.
Raises KeyError if the record isn't found.
"""
self.modified = True
old = self.tree.xpath('/*/*[@id=$id]', id=id)
if len(old) == 0:
raise KeyError("Key with id %r not found" % id)
assert len(old) == 1
old[0].getparent().remove(old[0])
def get(self, id):
"""Get the record with the given id.
Raises KeyError if the record isn't found.
"""
old = self.tree.xpath('/*/*[@id=$id]', id=id)
if len(old) == 0:
raise KeyError("Key with id %r not found" % id)
assert len(old) == 1
return old[0]
class Cache(object):
"""A cache of parsed XML files.
"""
def __init__(self):
self.cache = {}
def __del__(self):
"""Flush any modified trees on delete.
Don't write code relying on this method; it's here to try and
prevent dataloss, but you should always call flush() (or clear())
explicitly before exiting.
"""
self.flush()
def flush(self):
"""Write any modified trees in the cache to disk.
"""
for filepath, tree in self.cache.iteritems():
if tree.modified:
tree.write(filepath)
def clear(self):
"""Clear the cache, flushing any modified trees.
"""
self.flush()
self.cache = {}
def get(self, filepath):
"""Get a tree from the cache.
Returns a CachedTree, creating an empty one if the file didn't
exist.
"""
tree = self.cache.get(filepath, None)
if tree is not None:
return tree
# Clear the cache - for now, we only keep one parsed file in it,
# but this could easily be modified in future.
self.clear()
if os.path.exists(filepath + '.xml'):
fd = open(filepath + '.xml', "rb")
try:
tree = XmlStore.CachedTree(etree.parse(fd))
finally:
fd.close()
elif os.path.exists(filepath + '.bak'):
# This can happen on windows if we crashed after renaming the
# old file away, but before writing the new one.
fd = open(filepath + '.bak', "rb")
try:
tree = XmlStore.CachedTree(etree.parse(fd))
finally:
fd.close()
else:
# Make an empty container tree.
# Don't mark it as modified, because if no entries get put into
# it, we don't want to write it back.
xmltree = etree.ElementTree(etree.Element("container"))
tree = XmlStore.CachedTree(xmltree)
self.cache[filepath] = tree
return tree
def __init__(self, topdir, items_per_file=100):
"""Make a new XML store, in the directory given by topdir.
The XML store stores the records it is given in a set of XML files. It
puts at most `items_per_file` items in each file.
"""
self.topdir = topdir
# Dictionary mapping from id to file number.
self.idmap = {}
self.idmap_modified = False
# Meta info dictionary.
self.meta = {}
# Next file number to allocate.
self.next_num = 0
# Number of files to put in each directory.
self.files_per_dir = 10
# Number of items to put in each file.
self.items_per_file = items_per_file
# Cache of parsed files.
self.tree_cache = XmlStore.Cache()
# Read the idmap from idmap.xml
self.idmap_path = os.path.join(self.topdir, 'idmap')
fd = None
if os.path.exists(self.idmap_path + ".xml"):
fd = open(self.idmap_path + ".xml", "rb")
elif os.path.exists(self.idmap_path + ".bak"):
fd = open(self.idmap_path + ".bak", "rb")
if fd is not None:
try:
idmap_xml = etree.parse(fd)
finally:
fd.close()
for elt in idmap_xml.xpath(u'/*/id'):
num = int(elt.text)
self.idmap[elt.get(u'id')] = num
if num > self.next_num:
self.next_num = num
for elt in idmap_xml.xpath(u'/*/meta'):
self.meta[elt.get(u'name')] = elt.text
def __del__(self):
"""Flush any modified trees on delete.
Don't write code relying on this method; it's here to try and
prevent dataloss, but you should always call flush() or close()
explicitly before exiting.
"""
self.flush()
def __len__(self):
return len(self.idmap)
def __iter__(self):
keys = self.idmap.keys()
keys.sort()
for key in keys:
yield key
def close(self):
"""Cleanup, flushing all changes to disk, and dropping any cache items.
"""
self.flush()
self.tree_cache.clear()
self.idmap = None
def flush(self):
"""Flush all changes to disk.
"""
if self.idmap_modified:
map = etree.Element("idmap")
for id in sorted(self.idmap.keys()):
elt = etree.SubElement(map, "id")
elt.set('id', id)
elt.text = unicode(self.idmap[id])
for name in sorted(self.meta.keys()):
elt = etree.SubElement(map, "meta")
elt.set('name', name)
elt.text = unicode(self.meta[name])
map = etree.ElementTree(map)
if not os.path.exists(os.path.dirname(self.idmap_path)):
os.makedirs(os.path.dirname(self.idmap_path))
fd = open(self.idmap_path + ".tmp", "wb")
try:
map.write(fd, encoding="UTF-8", xml_declaration=True)
finally:
fd.close()
# Atomic replace of the old file with the new one.
rename(self.idmap_path, ".tmp", ".xml")
self.idmap_modified = False
self.tree_cache.flush()
def _num_to_path(self, filenum):
"""Convert a file number to the path for that file.
Returns the path without the extension.
"""
components = []
while filenum >= self.files_per_dir:
components.append(filenum % self.files_per_dir)
filenum = filenum // self.files_per_dir
components.append(filenum)
components.reverse()
result = []
for c in components[:-1]:
result.append("D%d" % c)
result.append("F%d" % components[-1])
return os.path.join(self.topdir, *result)
def _get_tree(self, filenum):
"""Get the tree for a given filenum.
"""
return self.tree_cache.get(self._num_to_path(filenum))
def set(self, record):
"""Set a record.
`record` should be an lxml Element object, or a unicode string
containing raw XML. The record must have an "id" attribute containing
the ID to use.
If a record with the same id already exists, it is replaced.
"""
assert not isinstance(record, str)
if isinstance(record, unicode):
record = etree.fromstring(record)
id = record.get(u'id')
assert id is not None
filenum = self.idmap.get(id, None)
if filenum is None:
filenum = self.next_num
tree = self._get_tree(filenum)
if len(tree) >= self.items_per_file:
self.next_num += 1
filenum = self.next_num
tree = self._get_tree(filenum)
self.idmap[id] = filenum
self.idmap_modified = True
else:
tree = self._get_tree(filenum)
tree.set(record)
def remove(self, id):
"""Remove the record with a given id.
Raises KeyError if the record isn't found.
"""
filenum = self.idmap[id]
tree = self._get_tree(filenum)
tree.remove(id)
del self.idmap[id]
self.idmap_modified = True
def get(self, id):
"""Get the record with a given id.
Raises KeyError if the record isn't found.
"""
filenum = self.idmap[id]
tree = self._get_tree(filenum)
return tree.get(id)
def get_meta(self, name):
"""Get a metadata value.
Raises KeyError if the value isn't found.
"""
assert isinstance(name, unicode)
return self.meta[name]
def set_meta(self, name, value):
"""Set a metadata value.
"""
assert isinstance(name, unicode)
assert isinstance(value, unicode)
self.meta[name] = value
self.idmap_modified = True
def del_meta(self, name):
"""Delete a metadata value.
"""
assert isinstance(name, unicode)
del self.meta[name]
self.idmap_modified = True | 0.519765 | 0.181463 |
from rdkit import Chem
from rdkit.Chem import Lipinski as LPK
import pandas as pd
def CalculateMolWeight(mol):
"""
Calculation of molecular weight. Note that not including H
Parameters:
mol: rdkit molecule
Returns:
MolWeight: Molecular weight
"""
MolWeight = 0
for atom in mol.GetAtoms():
MolWeight = MolWeight + atom.GetMass()
return MolWeight
def CalculateAverageMolWeight(mol):
"""
Calculation of average molecular weight. Note that not including H
Parameters:
mol: rdkit molecule
Returns:
AvgMolWeight: Average Molecular weight
"""
MolWeight = 0
for atom in mol.GetAtoms():
MolWeight = MolWeight + atom.GetMass()
return MolWeight / mol.GetNumAtoms()
def CalculateHydrogenNumber(mol):
"""
Calculation of Number of Hydrogen in a molecule
Parameters:
mol: rdkit molecule
Returns:
HydrogenNumber
"""
i = 0
Hmol = Chem.AddHs(mol)
for atom in Hmol.GetAtoms():
if atom.GetAtomicNum() == 1:
i = i + 1
return i
def CalculateHalogenNumber(mol):
"""
Calculation of Halogen counts in a molecule
Parameters:
mol: rdkit molecule
Returns:
HalogenNumber
"""
i = 0
for atom in mol.GetAtoms():
if atom.GetAtomicNum() == 9 or atom.GetAtomicNum() == 17 or atom.GetAtomicNum() == 35 or atom.GetAtomicNum() == 53:
i = i + 1
return i
def CalculateHeteroNumber(mol):
"""
Calculation of Hetero counts in a molecule
Parameters:
mol: rdkit molecule
Returns:
HeteroNumber
"""
i = 0
for atom in mol.GetAtoms():
if atom.GetAtomicNum() == 6 or atom.GetAtomicNum() == 1:
i = i + 1
return mol.GetNumAtoms() - i
def CalculateHeavyAtomNumber(mol):
"""
Calculation of Heavy atom counts in a molecule
Parameters:
mol: rdkit molecule
Returns:
Heavy Atom Number
"""
return mol.GetNumHeavyAtoms()
def _CalculateElementNumber(mol, AtomicNumber=6):
"""
**Internal used only**
Calculation of element counts with atomic number equal to n in a molecule
"""
i = 0
for atom in mol.GetAtoms():
if atom.GetAtomicNum() == AtomicNumber:
i = i + 1
return i
def CalculateFlorineNumber(mol):
"""
Calculation of Florine count in a molecule
Parameters:
mol: rdkit molecule
Returns:
Florine Number
"""
return _CalculateElementNumber(mol, AtomicNumber=9)
def CalculateChlorineNumber(mol):
"""
Calculation of Chlorine count in a molecule
Parameters:
mol: rdkit molecule
Returns:
Chlorine Number
"""
return _CalculateElementNumber(mol, AtomicNumber=17)
def CalculateBromineNumber(mol):
"""
Calculation of Bromine counts in a molecule
Parameters:
mol: rdkit molecule
Returns:
Bromine Number
"""
return _CalculateElementNumber(mol, AtomicNumber=35)
def CalculateIodineNumber(mol):
"""
Calculation of Iodine counts in a molecule
Parameters:
mol: rdkit molecule
Returns:
Iodine Number
"""
return _CalculateElementNumber(mol, AtomicNumber=53)
def CalculateCarbonNumber(mol):
"""
Calculation of Carbon number in a molecule
Parameters:
mol: rdkit molecule
Returns:
Carbon Number
"""
return _CalculateElementNumber(mol, AtomicNumber=6)
def CalculatePhosphorNumber(mol):
"""
Calculation of Phosphorus number in a molecule
Parameters:
mol: rdkit molecule
Returns:
Heavy Atom Number
"""
return _CalculateElementNumber(mol, AtomicNumber=15)
def CalculateSulfurNumber(mol):
"""
Calculation of Sulfur count in a molecule
Parameters:
mol: rdkit molecule
Returns:
Sulfur Number
"""
return _CalculateElementNumber(mol, AtomicNumber=16)
def CalculateOxygenNumber(mol):
"""
Calculation of Oxygen count in a molecule
Parameters:
mol: rdkit molecule
Returns:
Oxygen Number
"""
return _CalculateElementNumber(mol, AtomicNumber=8)
def CalculateNitrogenNumber(mol):
"""
Calculation of Nitrogen count in a molecule
Parameters:
mol: rdkit molecule
Returns:
Nitrogen Number
"""
return _CalculateElementNumber(mol, AtomicNumber=7)
def CalculateRingNumber(mol):
"""
Calculation of ring counts in a molecule
Parameters:
mol: rdkit molecule
Returns:
Ring Number
"""
return Chem.GetSSSR(mol)
def CalculateRotationBondNumber(mol):
"""
Calculation of rotation bonds count in a molecule
Parameters:
mol: rdkit molecule
Returns:
Rotation Bond Number
"""
return LPK.NumRotatableBonds(mol)
def CalculateHdonorNumber(mol):
"""
Calculation of Hydrongen bond donor count in a molecule
Parameters:
mol: rdkit molecule
Returns:
Hdonor Number
"""
return LPK.NumHDonors(mol)
def CalculateHacceptorNumber(mol):
"""
Calculation of Hydrogen bond acceptor count in a molecule
Parameters:
mol: rdkit molecule
Returns:
Hacceptor Number
"""
return LPK.NumHAcceptors(mol)
def CalculateSingleBondNumber(mol):
"""
Calculation of single bond counts in a molecule
Parameters:
mol: rdkit molecule
Returns:
Single Bond Number
"""
i = 0;
for bond in mol.GetBonds():
if bond.GetBondType().name == 'SINGLE':
i = i + 1
return i
def CalculateDoubleBondNumber(mol):
"""
Calculation of double bond counts in a molecule
Parameters:
mol: rdkit molecule
Returns:
Double Bond Number
"""
i = 0;
for bond in mol.GetBonds():
if bond.GetBondType().name == 'DOUBLE':
i = i + 1
return i
def CalculateTripleBondNumber(mol):
"""
Calculation of triple bond counts in a molecule
Parameters:
mol: rdkit molecule
Returns:
Triple Bond Number
"""
i = 0;
for bond in mol.GetBonds():
if bond.GetBondType().name == 'TRIPLE':
i = i + 1
return i
def CalculateAromaticBondNumber(mol):
"""
Calculation of aromatic bond counts in a molecule
Parameters:
mol: rdkit molecule
Returns:
Aromatic Bond Number
"""
i = 0;
for bond in mol.GetBonds():
if bond.GetBondType().name == 'AROMATIC':
i = i + 1
return i
def CalculateAllAtomNumber(mol):
"""
Calculation of all atom counts in a molecule
Parameters:
mol: rdkit molecule
Returns:
All Atom Count
"""
return Chem.AddHs(mol).GetNumAtoms()
def _CalculatePathN(mol, PathLength=2):
"""
*Internal Use Only*
Calculation of the counts of path length N for a molecule
"""
return len(Chem.FindAllPathsOfLengthN(mol, PathLength, useBonds=1))
def CalculatePath1(mol):
"""
Calculation of the counts of path length 1 for a molecule
"""
return _CalculatePathN(mol, 1)
def CalculatePath2(mol):
"""
Calculation of the counts of path length 2 for a molecule
"""
return _CalculatePathN(mol, 2)
def CalculatePath3(mol):
"""
Calculation of the counts of path length 3 for a molecule
"""
return _CalculatePathN(mol, 3)
def CalculatePath4(mol):
"""
Calculation of the counts of path length 4 for a molecule
"""
return _CalculatePathN(mol, 4)
def CalculatePath5(mol):
"""
Calculation of the counts of path length 5 for a molecule
"""
return _CalculatePathN(mol, 5)
def CalculatePath6(mol):
"""
Calculation of the counts of path length 6 for a molecule
"""
return _CalculatePathN(mol, 6)
_constitutional = {'Weight': CalculateMolWeight,
'AWeight': CalculateAverageMolWeight,
'nhyd': CalculateHydrogenNumber,
'nhal': CalculateHalogenNumber,
'nhet': CalculateHeteroNumber,
'nhev': CalculateHeavyAtomNumber,
'ncof': CalculateFlorineNumber,
'ncocl': CalculateChlorineNumber,
'ncobr': CalculateBromineNumber,
'ncoi': CalculateIodineNumber,
'ncarb': CalculateCarbonNumber,
'nphos': CalculatePhosphorNumber,
'nsulph': CalculateOxygenNumber,
'noxy': CalculateOxygenNumber,
'nnitro': CalculateNitrogenNumber,
'nring': CalculateRingNumber,
'nrot': CalculateRotationBondNumber,
'ndonr': CalculateHdonorNumber,
'naccr': CalculateHacceptorNumber,
'nsb': CalculateSingleBondNumber,
'ndb': CalculateDoubleBondNumber,
'naro': CalculateAromaticBondNumber,
'ntb': CalculateTripleBondNumber,
'nta': CalculateAllAtomNumber,
'PC1': CalculatePath1,
'PC2': CalculatePath2,
'PC3': CalculatePath3,
'PC4': CalculatePath4,
'PC5': CalculatePath5,
'PC6': CalculatePath6
}
def GetConstitutionalofMol(mol):
"""
Get the dictionary of constitutional descriptors for given molecule mol
Parameters:
mol: rdkit molecule
Returns:
constitution descriptors: dict
"""
result = {}
for DesLabel in _constitutional.keys():
result[DesLabel] = round(_constitutional[DesLabel](mol), 3)
return result
def getConstitutional(df_x):
"""
Calculates all constitutional descriptors for the dataset
Parameters:
df_x: pandas.DataFrame
SMILES DataFrame
Returns:
constitutional_descriptors: pandas.DataFrame
Constitutional Descriptors DataFrame
"""
r = {}
for key in _constitutional.keys():
r[key] = []
for m in df_x['SMILES']:
mol = Chem.MolFromSmiles(m)
res = GetConstitutionalofMol(mol)
for key in _constitutional.keys():
r[key].append(res[key])
constitutional_descriptors = pd.DataFrame(r).round(3)
return pd.DataFrame(constitutional_descriptors) | smdt/descriptors/constitution.py | from rdkit import Chem
from rdkit.Chem import Lipinski as LPK
import pandas as pd
def CalculateMolWeight(mol):
"""
Calculation of molecular weight. Note that not including H
Parameters:
mol: rdkit molecule
Returns:
MolWeight: Molecular weight
"""
MolWeight = 0
for atom in mol.GetAtoms():
MolWeight = MolWeight + atom.GetMass()
return MolWeight
def CalculateAverageMolWeight(mol):
"""
Calculation of average molecular weight. Note that not including H
Parameters:
mol: rdkit molecule
Returns:
AvgMolWeight: Average Molecular weight
"""
MolWeight = 0
for atom in mol.GetAtoms():
MolWeight = MolWeight + atom.GetMass()
return MolWeight / mol.GetNumAtoms()
def CalculateHydrogenNumber(mol):
"""
Calculation of Number of Hydrogen in a molecule
Parameters:
mol: rdkit molecule
Returns:
HydrogenNumber
"""
i = 0
Hmol = Chem.AddHs(mol)
for atom in Hmol.GetAtoms():
if atom.GetAtomicNum() == 1:
i = i + 1
return i
def CalculateHalogenNumber(mol):
"""
Calculation of Halogen counts in a molecule
Parameters:
mol: rdkit molecule
Returns:
HalogenNumber
"""
i = 0
for atom in mol.GetAtoms():
if atom.GetAtomicNum() == 9 or atom.GetAtomicNum() == 17 or atom.GetAtomicNum() == 35 or atom.GetAtomicNum() == 53:
i = i + 1
return i
def CalculateHeteroNumber(mol):
"""
Calculation of Hetero counts in a molecule
Parameters:
mol: rdkit molecule
Returns:
HeteroNumber
"""
i = 0
for atom in mol.GetAtoms():
if atom.GetAtomicNum() == 6 or atom.GetAtomicNum() == 1:
i = i + 1
return mol.GetNumAtoms() - i
def CalculateHeavyAtomNumber(mol):
"""
Calculation of Heavy atom counts in a molecule
Parameters:
mol: rdkit molecule
Returns:
Heavy Atom Number
"""
return mol.GetNumHeavyAtoms()
def _CalculateElementNumber(mol, AtomicNumber=6):
"""
**Internal used only**
Calculation of element counts with atomic number equal to n in a molecule
"""
i = 0
for atom in mol.GetAtoms():
if atom.GetAtomicNum() == AtomicNumber:
i = i + 1
return i
def CalculateFlorineNumber(mol):
"""
Calculation of Florine count in a molecule
Parameters:
mol: rdkit molecule
Returns:
Florine Number
"""
return _CalculateElementNumber(mol, AtomicNumber=9)
def CalculateChlorineNumber(mol):
"""
Calculation of Chlorine count in a molecule
Parameters:
mol: rdkit molecule
Returns:
Chlorine Number
"""
return _CalculateElementNumber(mol, AtomicNumber=17)
def CalculateBromineNumber(mol):
"""
Calculation of Bromine counts in a molecule
Parameters:
mol: rdkit molecule
Returns:
Bromine Number
"""
return _CalculateElementNumber(mol, AtomicNumber=35)
def CalculateIodineNumber(mol):
"""
Calculation of Iodine counts in a molecule
Parameters:
mol: rdkit molecule
Returns:
Iodine Number
"""
return _CalculateElementNumber(mol, AtomicNumber=53)
def CalculateCarbonNumber(mol):
"""
Calculation of Carbon number in a molecule
Parameters:
mol: rdkit molecule
Returns:
Carbon Number
"""
return _CalculateElementNumber(mol, AtomicNumber=6)
def CalculatePhosphorNumber(mol):
"""
Calculation of Phosphorus number in a molecule
Parameters:
mol: rdkit molecule
Returns:
Heavy Atom Number
"""
return _CalculateElementNumber(mol, AtomicNumber=15)
def CalculateSulfurNumber(mol):
"""
Calculation of Sulfur count in a molecule
Parameters:
mol: rdkit molecule
Returns:
Sulfur Number
"""
return _CalculateElementNumber(mol, AtomicNumber=16)
def CalculateOxygenNumber(mol):
"""
Calculation of Oxygen count in a molecule
Parameters:
mol: rdkit molecule
Returns:
Oxygen Number
"""
return _CalculateElementNumber(mol, AtomicNumber=8)
def CalculateNitrogenNumber(mol):
"""
Calculation of Nitrogen count in a molecule
Parameters:
mol: rdkit molecule
Returns:
Nitrogen Number
"""
return _CalculateElementNumber(mol, AtomicNumber=7)
def CalculateRingNumber(mol):
"""
Calculation of ring counts in a molecule
Parameters:
mol: rdkit molecule
Returns:
Ring Number
"""
return Chem.GetSSSR(mol)
def CalculateRotationBondNumber(mol):
"""
Calculation of rotation bonds count in a molecule
Parameters:
mol: rdkit molecule
Returns:
Rotation Bond Number
"""
return LPK.NumRotatableBonds(mol)
def CalculateHdonorNumber(mol):
"""
Calculation of Hydrongen bond donor count in a molecule
Parameters:
mol: rdkit molecule
Returns:
Hdonor Number
"""
return LPK.NumHDonors(mol)
def CalculateHacceptorNumber(mol):
"""
Calculation of Hydrogen bond acceptor count in a molecule
Parameters:
mol: rdkit molecule
Returns:
Hacceptor Number
"""
return LPK.NumHAcceptors(mol)
def CalculateSingleBondNumber(mol):
"""
Calculation of single bond counts in a molecule
Parameters:
mol: rdkit molecule
Returns:
Single Bond Number
"""
i = 0;
for bond in mol.GetBonds():
if bond.GetBondType().name == 'SINGLE':
i = i + 1
return i
def CalculateDoubleBondNumber(mol):
"""
Calculation of double bond counts in a molecule
Parameters:
mol: rdkit molecule
Returns:
Double Bond Number
"""
i = 0;
for bond in mol.GetBonds():
if bond.GetBondType().name == 'DOUBLE':
i = i + 1
return i
def CalculateTripleBondNumber(mol):
"""
Calculation of triple bond counts in a molecule
Parameters:
mol: rdkit molecule
Returns:
Triple Bond Number
"""
i = 0;
for bond in mol.GetBonds():
if bond.GetBondType().name == 'TRIPLE':
i = i + 1
return i
def CalculateAromaticBondNumber(mol):
"""
Calculation of aromatic bond counts in a molecule
Parameters:
mol: rdkit molecule
Returns:
Aromatic Bond Number
"""
i = 0;
for bond in mol.GetBonds():
if bond.GetBondType().name == 'AROMATIC':
i = i + 1
return i
def CalculateAllAtomNumber(mol):
"""
Calculation of all atom counts in a molecule
Parameters:
mol: rdkit molecule
Returns:
All Atom Count
"""
return Chem.AddHs(mol).GetNumAtoms()
def _CalculatePathN(mol, PathLength=2):
"""
*Internal Use Only*
Calculation of the counts of path length N for a molecule
"""
return len(Chem.FindAllPathsOfLengthN(mol, PathLength, useBonds=1))
def CalculatePath1(mol):
"""
Calculation of the counts of path length 1 for a molecule
"""
return _CalculatePathN(mol, 1)
def CalculatePath2(mol):
"""
Calculation of the counts of path length 2 for a molecule
"""
return _CalculatePathN(mol, 2)
def CalculatePath3(mol):
"""
Calculation of the counts of path length 3 for a molecule
"""
return _CalculatePathN(mol, 3)
def CalculatePath4(mol):
"""
Calculation of the counts of path length 4 for a molecule
"""
return _CalculatePathN(mol, 4)
def CalculatePath5(mol):
"""
Calculation of the counts of path length 5 for a molecule
"""
return _CalculatePathN(mol, 5)
def CalculatePath6(mol):
"""
Calculation of the counts of path length 6 for a molecule
"""
return _CalculatePathN(mol, 6)
_constitutional = {'Weight': CalculateMolWeight,
'AWeight': CalculateAverageMolWeight,
'nhyd': CalculateHydrogenNumber,
'nhal': CalculateHalogenNumber,
'nhet': CalculateHeteroNumber,
'nhev': CalculateHeavyAtomNumber,
'ncof': CalculateFlorineNumber,
'ncocl': CalculateChlorineNumber,
'ncobr': CalculateBromineNumber,
'ncoi': CalculateIodineNumber,
'ncarb': CalculateCarbonNumber,
'nphos': CalculatePhosphorNumber,
'nsulph': CalculateOxygenNumber,
'noxy': CalculateOxygenNumber,
'nnitro': CalculateNitrogenNumber,
'nring': CalculateRingNumber,
'nrot': CalculateRotationBondNumber,
'ndonr': CalculateHdonorNumber,
'naccr': CalculateHacceptorNumber,
'nsb': CalculateSingleBondNumber,
'ndb': CalculateDoubleBondNumber,
'naro': CalculateAromaticBondNumber,
'ntb': CalculateTripleBondNumber,
'nta': CalculateAllAtomNumber,
'PC1': CalculatePath1,
'PC2': CalculatePath2,
'PC3': CalculatePath3,
'PC4': CalculatePath4,
'PC5': CalculatePath5,
'PC6': CalculatePath6
}
def GetConstitutionalofMol(mol):
"""
Get the dictionary of constitutional descriptors for given molecule mol
Parameters:
mol: rdkit molecule
Returns:
constitution descriptors: dict
"""
result = {}
for DesLabel in _constitutional.keys():
result[DesLabel] = round(_constitutional[DesLabel](mol), 3)
return result
def getConstitutional(df_x):
"""
Calculates all constitutional descriptors for the dataset
Parameters:
df_x: pandas.DataFrame
SMILES DataFrame
Returns:
constitutional_descriptors: pandas.DataFrame
Constitutional Descriptors DataFrame
"""
r = {}
for key in _constitutional.keys():
r[key] = []
for m in df_x['SMILES']:
mol = Chem.MolFromSmiles(m)
res = GetConstitutionalofMol(mol)
for key in _constitutional.keys():
r[key].append(res[key])
constitutional_descriptors = pd.DataFrame(r).round(3)
return pd.DataFrame(constitutional_descriptors) | 0.841305 | 0.640383 |
from __future__ import print_function
import argparse
import os.path
import models.examples as ex
from config import cfg
from generic_op import *
from midap_simulator import *
from midap_software import Compiler, MidapModel
def parse():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_shape', nargs='+', type=int, required=True)
parser.add_argument('-oc', '--out_chan', type=int, required=True)
parser.add_argument('-k', '--kern_info', nargs='+', type=int, required=True)
parser.add_argument('-l', '--layer_compiler', type=str, choices=['MIN_DRAM_ACCESS', 'HIDE_DRAM_LATENCY'], default='HIDE_DRAM_LATENCY')
parser.add_argument('-ib', '--init_banks', type=int, default=0)
parser.add_argument('-b', '--bus_policy', type=str, choices=['WMEM_FIRST', 'FIFO'], default='WMEM_FIRST')
parser.add_argument('-o', '--output_dir', type=str, default=None)
parser.add_argument('-da', '--disable_abstract_layer', action="store_true", default=False)
parser.add_argument('-f', '--fmem_entries', type=int, default=256)
parser.add_argument('-nb', '--num_banks', type=int, default=4)
parser.add_argument('--latency', type=int, default=100)
parser.add_argument('--bandwidth', type=int, default=32)
return parser.parse_args()
class TestWrapper(object):
def __init__(self):
self.cv = GenericConvertor()
self.midap_model = MidapModel()
self.cm = Compiler()
self.midap_simulator = MidapManager()
self.step_checker = [0, 0, 0]
def setup_from_builder(self, builder):
odict = builder.get_operator_dict()
self.cv.operator_dict = odict
self.cv.post_process()
self.midap_model.from_generic_op_dict(odict)
self.step_checker[0] = 1
if self.step_checker[1] > 0:
del self.cm
self.cm = Compiler()
self.step_checker[1] = 0
def compile(self, num_init_banks):
if self.step_checker[0] == 0:
print("Please setup the model first")
return
self.cm.force_setup(num_init_banks)
static_info = self.cm.compile(self.midap_model)
self.step_checker[1] = 1
if self.step_checker[2] > 0:
del self.midap_simulator
self.midap_simulator = MidapManager()
self.step_checker[2] = 0
return static_info
def simulate(self):
if self.step_checker[0] == 0:
print("Please setup the model first")
return
elif self.step_checker[1] == 0:
print("Please run compile")
return
input_tensor_list, path_info = self.cm.control_info
init_layer_list = self.midap_model.init_layer
_ = self.midap_simulator.process_network_with_multiple_input(input_tensor_list, init_layer_list, path_info)
self.step_checker[2] = 1
return path_info
def run_all(self, model, output_dir=None, output_option=(True, False, False, False)):
self.__init__()
self.setup_from_builder(model)
model = model.name
self.logger.info("[ {} ]".format(model))
_ = self.compile()
sim_instruction, stat = self.simulate()
diff, latency, feature_dram, weight_dram = stat
# print("check stat(Checking info) of network {}: {}".format(model ,stat), file=sys.stderr)
if diff > 0:
self.logger.error(
"Network Result Diff > 0: Functional Problem may occur, network {}".format(model))
self.midap_simulator.stats.print_result(sim_instruction.processing_order, model)
args = parse()
cfg.MIDAP.CONTROL_STRATEGY.LAYER_COMPILER = args.layer_compiler
cfg.MIDAP.BUS_POLICY = args.bus_policy
cfg.MODEL.ALLOW_ABSTRACT_DATA = not args.disable_abstract_layer
cfg.MODEL.REDUCTION_LOGIC = True
# Configuration
cfg.MIDAP.SYSTEM_WIDTH = 64
# cfg.MIDAP.FMEM.SIZE = 256 * 1024
cfg.MIDAP.FMEM.NUM_ENTRIES = args.fmem_entries * 1024
cfg.MIDAP.FMEM.NUM = args.num_banks
cfg.SYSTEM.BANDWIDTH = args.bandwidth # GB ( * 10^9 byte) / s
cfg.LATENCY.DRAM_READ = args.latency
output_dir = args.output_dir
tr = TestWrapper()
mb = ex.one_layer_example(args.input_shape, args.out_chan, args.kern_info)
tr.run_all("custom", mb, args.init_banks, output_dir=output_dir) | one_layer_test.py | from __future__ import print_function
import argparse
import os.path
import models.examples as ex
from config import cfg
from generic_op import *
from midap_simulator import *
from midap_software import Compiler, MidapModel
def parse():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_shape', nargs='+', type=int, required=True)
parser.add_argument('-oc', '--out_chan', type=int, required=True)
parser.add_argument('-k', '--kern_info', nargs='+', type=int, required=True)
parser.add_argument('-l', '--layer_compiler', type=str, choices=['MIN_DRAM_ACCESS', 'HIDE_DRAM_LATENCY'], default='HIDE_DRAM_LATENCY')
parser.add_argument('-ib', '--init_banks', type=int, default=0)
parser.add_argument('-b', '--bus_policy', type=str, choices=['WMEM_FIRST', 'FIFO'], default='WMEM_FIRST')
parser.add_argument('-o', '--output_dir', type=str, default=None)
parser.add_argument('-da', '--disable_abstract_layer', action="store_true", default=False)
parser.add_argument('-f', '--fmem_entries', type=int, default=256)
parser.add_argument('-nb', '--num_banks', type=int, default=4)
parser.add_argument('--latency', type=int, default=100)
parser.add_argument('--bandwidth', type=int, default=32)
return parser.parse_args()
class TestWrapper(object):
def __init__(self):
self.cv = GenericConvertor()
self.midap_model = MidapModel()
self.cm = Compiler()
self.midap_simulator = MidapManager()
self.step_checker = [0, 0, 0]
def setup_from_builder(self, builder):
odict = builder.get_operator_dict()
self.cv.operator_dict = odict
self.cv.post_process()
self.midap_model.from_generic_op_dict(odict)
self.step_checker[0] = 1
if self.step_checker[1] > 0:
del self.cm
self.cm = Compiler()
self.step_checker[1] = 0
def compile(self, num_init_banks):
if self.step_checker[0] == 0:
print("Please setup the model first")
return
self.cm.force_setup(num_init_banks)
static_info = self.cm.compile(self.midap_model)
self.step_checker[1] = 1
if self.step_checker[2] > 0:
del self.midap_simulator
self.midap_simulator = MidapManager()
self.step_checker[2] = 0
return static_info
def simulate(self):
if self.step_checker[0] == 0:
print("Please setup the model first")
return
elif self.step_checker[1] == 0:
print("Please run compile")
return
input_tensor_list, path_info = self.cm.control_info
init_layer_list = self.midap_model.init_layer
_ = self.midap_simulator.process_network_with_multiple_input(input_tensor_list, init_layer_list, path_info)
self.step_checker[2] = 1
return path_info
def run_all(self, model, output_dir=None, output_option=(True, False, False, False)):
self.__init__()
self.setup_from_builder(model)
model = model.name
self.logger.info("[ {} ]".format(model))
_ = self.compile()
sim_instruction, stat = self.simulate()
diff, latency, feature_dram, weight_dram = stat
# print("check stat(Checking info) of network {}: {}".format(model ,stat), file=sys.stderr)
if diff > 0:
self.logger.error(
"Network Result Diff > 0: Functional Problem may occur, network {}".format(model))
self.midap_simulator.stats.print_result(sim_instruction.processing_order, model)
args = parse()
cfg.MIDAP.CONTROL_STRATEGY.LAYER_COMPILER = args.layer_compiler
cfg.MIDAP.BUS_POLICY = args.bus_policy
cfg.MODEL.ALLOW_ABSTRACT_DATA = not args.disable_abstract_layer
cfg.MODEL.REDUCTION_LOGIC = True
# Configuration
cfg.MIDAP.SYSTEM_WIDTH = 64
# cfg.MIDAP.FMEM.SIZE = 256 * 1024
cfg.MIDAP.FMEM.NUM_ENTRIES = args.fmem_entries * 1024
cfg.MIDAP.FMEM.NUM = args.num_banks
cfg.SYSTEM.BANDWIDTH = args.bandwidth # GB ( * 10^9 byte) / s
cfg.LATENCY.DRAM_READ = args.latency
output_dir = args.output_dir
tr = TestWrapper()
mb = ex.one_layer_example(args.input_shape, args.out_chan, args.kern_info)
tr.run_all("custom", mb, args.init_banks, output_dir=output_dir) | 0.387343 | 0.105211 |