code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
"""Support for CO2 sensor connected to a serial port."""
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.const import (
ATTR_TEMPERATURE, CONF_NAME, CONF_MONITORED_CONDITIONS, TEMP_FAHRENHEIT)
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.util.temperature import celsius_to_fahrenheit
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
CONF_SERIAL_DEVICE = 'serial_device'
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=10)
DEFAULT_NAME = 'CO2 Sensor'
ATTR_CO2_CONCENTRATION = 'co2_concentration'
SENSOR_TEMPERATURE = 'temperature'
SENSOR_CO2 = 'co2'
SENSOR_TYPES = {
SENSOR_TEMPERATURE: ['Temperature', None],
SENSOR_CO2: ['CO2', 'ppm']
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_SERIAL_DEVICE): cv.string,
vol.Optional(CONF_MONITORED_CONDITIONS, default=[SENSOR_CO2]):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the available CO2 sensors."""
from pmsensor import co2sensor
try:
co2sensor.read_mh_z19(config.get(CONF_SERIAL_DEVICE))
except OSError as err:
_LOGGER.error("Could not open serial connection to %s (%s)",
config.get(CONF_SERIAL_DEVICE), err)
return False
SENSOR_TYPES[SENSOR_TEMPERATURE][1] = hass.config.units.temperature_unit
data = MHZClient(co2sensor, config.get(CONF_SERIAL_DEVICE))
dev = []
name = config.get(CONF_NAME)
for variable in config[CONF_MONITORED_CONDITIONS]:
dev.append(
MHZ19Sensor(data, variable, SENSOR_TYPES[variable][1], name))
add_entities(dev, True)
return True
class MHZ19Sensor(Entity):
"""Representation of an CO2 sensor."""
def __init__(self, mhz_client, sensor_type, temp_unit, name):
"""Initialize a new PM sensor."""
self._mhz_client = mhz_client
self._sensor_type = sensor_type
self._temp_unit = temp_unit
self._name = name
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
self._ppm = None
self._temperature = None
@property
def name(self):
"""Return the name of the sensor."""
return '{}: {}'.format(self._name, SENSOR_TYPES[self._sensor_type][0])
@property
def state(self):
"""Return the state of the sensor."""
return self._ppm if self._sensor_type == SENSOR_CO2 \
else self._temperature
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
def update(self):
"""Read from sensor and update the state."""
self._mhz_client.update()
data = self._mhz_client.data
self._temperature = data.get(SENSOR_TEMPERATURE)
if self._temperature is not None and \
self._temp_unit == TEMP_FAHRENHEIT:
self._temperature = round(
celsius_to_fahrenheit(self._temperature), 1)
self._ppm = data.get(SENSOR_CO2)
@property
def device_state_attributes(self):
"""Return the state attributes."""
result = {}
if self._sensor_type == SENSOR_TEMPERATURE and self._ppm is not None:
result[ATTR_CO2_CONCENTRATION] = self._ppm
if self._sensor_type == SENSOR_CO2 and self._temperature is not None:
result[ATTR_TEMPERATURE] = self._temperature
return result
class MHZClient:
"""Get the latest data from the MH-Z sensor."""
def __init__(self, co2sensor, serial):
"""Initialize the sensor."""
self.co2sensor = co2sensor
self._serial = serial
self.data = dict()
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data the MH-Z19 sensor."""
self.data = {}
try:
result = self.co2sensor.read_mh_z19_with_temperature(self._serial)
if result is None:
return
co2, temperature = result
except OSError as err:
_LOGGER.error("Could not open serial connection to %s (%s)",
self._serial, err)
return
if temperature is not None:
self.data[SENSOR_TEMPERATURE] = temperature
if co2 is not None and 0 < co2 <= 5000:
self.data[SENSOR_CO2] = co2
|
[
"voluptuous.Optional",
"homeassistant.util.temperature.celsius_to_fahrenheit",
"voluptuous.Required",
"datetime.timedelta",
"homeassistant.util.Throttle",
"voluptuous.In",
"logging.getLogger"
] |
[((518, 545), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (535, 545), False, 'import logging\n'), ((611, 632), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(10)'}), '(seconds=10)\n', (620, 632), False, 'from datetime import timedelta\n'), ((3965, 3999), 'homeassistant.util.Throttle', 'Throttle', (['MIN_TIME_BETWEEN_UPDATES'], {}), '(MIN_TIME_BETWEEN_UPDATES)\n', (3973, 3999), False, 'from homeassistant.util import Throttle\n'), ((907, 952), 'voluptuous.Optional', 'vol.Optional', (['CONF_NAME'], {'default': 'DEFAULT_NAME'}), '(CONF_NAME, default=DEFAULT_NAME)\n', (919, 952), True, 'import voluptuous as vol\n'), ((969, 1001), 'voluptuous.Required', 'vol.Required', (['CONF_SERIAL_DEVICE'], {}), '(CONF_SERIAL_DEVICE)\n', (981, 1001), True, 'import voluptuous as vol\n'), ((1018, 1079), 'voluptuous.Optional', 'vol.Optional', (['CONF_MONITORED_CONDITIONS'], {'default': '[SENSOR_CO2]'}), '(CONF_MONITORED_CONDITIONS, default=[SENSOR_CO2])\n', (1030, 1079), True, 'import voluptuous as vol\n'), ((1114, 1134), 'voluptuous.In', 'vol.In', (['SENSOR_TYPES'], {}), '(SENSOR_TYPES)\n', (1120, 1134), True, 'import voluptuous as vol\n'), ((3222, 3262), 'homeassistant.util.temperature.celsius_to_fahrenheit', 'celsius_to_fahrenheit', (['self._temperature'], {}), '(self._temperature)\n', (3243, 3262), False, 'from homeassistant.util.temperature import celsius_to_fahrenheit\n')]
|
# Generated by Django 3.1 on 2020-09-08 09:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('package', '0003_auto_20200828_1838'),
]
operations = [
migrations.AlterField(
model_name='gaduurpackage',
name='shipping_type',
field=models.TextField(blank=True, max_length=10, null=True),
),
]
|
[
"django.db.models.TextField"
] |
[((363, 417), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'max_length': '(10)', 'null': '(True)'}), '(blank=True, max_length=10, null=True)\n', (379, 417), False, 'from django.db import migrations, models\n')]
|
#!/opt/anaconda3/envs/py37/bin/python
import numpy as np
import twd97
import sys
from cntr_kml import cntr_kml
from pyproj import Proj
import rasterio
fname = sys.argv[1]
img = rasterio.open(fname)
data=np.flip(img.read()[0,:,:],[0])
l,b,r,t=img.bounds[:]
LL=False
if (l+r)/2==img.lnglat()[0]:LL=True
x0,y0=img.xy(0,0)
nx,ny=img.width, img.height
dx,dy=(r-l)/nx,-(t-b)/ny
x = np.array([x0+dx*i for i in range(nx)])
y = np.array([y0+dy*i for i in range(ny)])
y.sort()
if LL:
lon, lat = np.meshgrid(x, y)
else:
x_g, y_g = np.meshgrid(x, y)
Xcent,Ycent=(x[0]+x[-1])/2, (y[0]+y[-1])/2
Latitude_Pole, Longitude_Pole=twd97.towgs84(Xcent, Ycent)
pnyc = Proj(proj='lcc', datum='NAD83', lat_1=10, lat_2=40,
lat_0=Latitude_Pole, lon_0=Longitude_Pole, x_0=0, y_0=0.0)
xgl,ygl=x_g-Xcent, y_g-Ycent
lon,lat=pnyc(xgl, ygl, inverse=True)
result=cntr_kml(data, lon, lat, fname)
|
[
"rasterio.open",
"numpy.meshgrid",
"cntr_kml.cntr_kml",
"twd97.towgs84",
"pyproj.Proj"
] |
[((178, 198), 'rasterio.open', 'rasterio.open', (['fname'], {}), '(fname)\n', (191, 198), False, 'import rasterio\n'), ((855, 886), 'cntr_kml.cntr_kml', 'cntr_kml', (['data', 'lon', 'lat', 'fname'], {}), '(data, lon, lat, fname)\n', (863, 886), False, 'from cntr_kml import cntr_kml\n'), ((489, 506), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (500, 506), True, 'import numpy as np\n'), ((526, 543), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (537, 543), True, 'import numpy as np\n'), ((621, 648), 'twd97.towgs84', 'twd97.towgs84', (['Xcent', 'Ycent'], {}), '(Xcent, Ycent)\n', (634, 648), False, 'import twd97\n'), ((658, 772), 'pyproj.Proj', 'Proj', ([], {'proj': '"""lcc"""', 'datum': '"""NAD83"""', 'lat_1': '(10)', 'lat_2': '(40)', 'lat_0': 'Latitude_Pole', 'lon_0': 'Longitude_Pole', 'x_0': '(0)', 'y_0': '(0.0)'}), "(proj='lcc', datum='NAD83', lat_1=10, lat_2=40, lat_0=Latitude_Pole,\n lon_0=Longitude_Pole, x_0=0, y_0=0.0)\n", (662, 772), False, 'from pyproj import Proj\n')]
|
import datetime
import json
import re
from tests.base import SoupTest
from pretix.base.models import Event, Organizer, Team, User
class MailSettingPreviewTest(SoupTest):
def setUp(self):
self.user = User.objects.create_user('<EMAIL>', 'dummy')
self.orga1 = Organizer.objects.create(name='CCC', slug='ccc')
self.orga2 = Organizer.objects.create(name='MRM', slug='mrm')
self.event1 = Event.objects.create(
organizer=self.orga1, name='30C3', slug='30c3',
date_from=datetime.datetime(2013, 12, 26, tzinfo=datetime.timezone.utc),
)
# event with locale
self.locale_event = Event.objects.create(
organizer=self.orga1, name={'en': '40C4-en', 'de-informal': '40C4-de'}, slug='40c4',
date_from=datetime.datetime(2013, 12, 26, tzinfo=datetime.timezone.utc),
)
self.locale_event.settings.locales = ['en', 'de-informal']
self.locale_event.save()
t = Team.objects.create(organizer=self.orga1, can_change_items=True, can_change_event_settings=True)
t.members.add(self.user)
t.limit_events.add(self.locale_event)
t.limit_events.add(self.event1)
self.client.login(email='<EMAIL>', password='<PASSWORD>')
self.target = '/control/event/{}/{}/settings/email/preview'
def test_permission(self):
self.event2 = Event.objects.create(
organizer=self.orga2, name='30M3', slug='30m3',
date_from=datetime.datetime(2013, 12, 26, tzinfo=datetime.timezone.utc),
)
response = self.client.post(self.target.format(
self.orga2.slug, self.event2.slug), {
'test': 'test1'
})
assert response.status_code == 404
def test_missing_item_key(self):
response = self.client.post(self.target.format(
self.orga1.slug, self.event1.slug), {
'item': 'dummy',
'mail_text_order_free_0': 'sss',
'mail_text_order_free_1': 'ttt'
})
assert response.status_code == 400
def test_invalid_item_field(self):
response = self.client.post(self.target.format(
self.orga1.slug, self.event1.slug), {
'item': 'mail_text_order_free',
'mail_text_order_free_w': 'sss'
})
assert response.status_code == 200
res = json.loads(response.content.decode())
assert res['item'] == 'mail_text_order_free'
assert len(res['msgs']) == 0
def test_invalid_language_index(self):
response = self.client.post(self.target.format(
self.orga1.slug, self.event1.slug), {
'item': 'mail_text_order_free',
'mail_text_order_free_1': 'sss'
})
assert response.status_code == 200
res = json.loads(response.content.decode())
assert res['item'] == 'mail_text_order_free'
assert len(res['msgs']) == 0
def test_no_item_field(self):
response = self.client.post(self.target.format(
self.orga1.slug, self.event1.slug), {
'mail_text_order_free_0': 'sss'
})
assert response.status_code == 400
def test_only_en(self):
dummy_text = 'This is dummy sentence for test'
response = self.client.post(self.target.format(
self.orga1.slug, self.event1.slug), {
'item': 'mail_text_order_free',
'mail_text_order_free_0': dummy_text
})
assert response.status_code == 200
res = json.loads(response.content.decode())
assert res['item'] == 'mail_text_order_free'
assert len(res['msgs']) == 1
assert res['msgs']['en'] == dummy_text
def test_multiple_languages(self):
dummy_text = 'This is dummy sentence for test'
response = self.client.post(self.target.format(
self.orga1.slug, self.locale_event.slug), {
'item': 'mail_text_order_free',
'mail_text_order_free_0': dummy_text,
'mail_text_order_free_2': dummy_text
})
assert response.status_code == 200
res = json.loads(response.content.decode())
assert res['item'] == 'mail_text_order_free'
assert len(res['msgs']) == 2
assert res['msgs']['en'] == dummy_text
assert res['msgs']['de-informal'] == dummy_text
def test_i18n_placeholders(self):
dummy_text = '{event}'
response = self.client.post(self.target.format(
self.orga1.slug, self.locale_event.slug), {
'item': 'mail_text_order_placed',
'mail_text_order_placed_0': dummy_text,
'mail_text_order_placed_2': dummy_text
})
assert response.status_code == 200
res = json.loads(response.content.decode())
assert res['item'] == 'mail_text_order_placed'
assert len(res['msgs']) == 2
assert res['msgs']['en'] == self.locale_event.name['en']
assert res['msgs']['de-informal'] == self.locale_event.name['de-informal']
def test_i18n_locale_order(self):
self.locale_event.settings.locales = ['de-informal', 'en']
self.locale_event.save()
dummy_text = '{event}'
response = self.client.post(self.target.format(
self.orga1.slug, self.locale_event.slug), {
'item': 'mail_text_order_placed',
'mail_text_order_placed_0': dummy_text,
'mail_text_order_placed_2': dummy_text
})
assert response.status_code == 200
res = json.loads(response.content.decode())
assert res['item'] == 'mail_text_order_placed'
assert len(res['msgs']) == 2
assert res['msgs']['de-informal'] == self.locale_event.name['de-informal']
assert res['msgs']['en'] == self.locale_event.name['en']
def test_mail_text_order_placed(self):
text = '{event}{total}{currency}{date}{payment_info}{url}{invoice_name}{invoice_company}'
response = self.client.post(self.target.format(
self.orga1.slug, self.event1.slug), {
'item': 'mail_text_order_placed',
'mail_text_order_placed_0': text
})
assert response.status_code == 200
res = json.loads(response.content.decode())
assert res['item'] == 'mail_text_order_placed'
assert len(res['msgs']) == 1
assert re.match('.*{.*}.*', res['msgs']['en']) is None
def test_mail_text_order_paid(self):
text = '{event}{url}{invoice_name}{invoice_company}{payment_info}'
response = self.client.post(self.target.format(
self.orga1.slug, self.event1.slug), {
'item': 'mail_text_order_paid',
'mail_text_order_paid_0': text
})
assert response.status_code == 200
res = json.loads(response.content.decode())
assert res['item'] == 'mail_text_order_paid'
assert len(res['msgs']) == 1
assert re.match('.*{.*}.*', res['msgs']['en']) is None
def test_mail_text_order_free(self):
text = '{event}{url}{invoice_name}{invoice_company}'
response = self.client.post(self.target.format(
self.orga1.slug, self.event1.slug), {
'item': 'mail_text_order_free',
'mail_text_order_free_0': text
})
assert response.status_code == 200
res = json.loads(response.content.decode())
assert res['item'] == 'mail_text_order_free'
assert len(res['msgs']) == 1
assert re.match('.*{.*}.*', res['msgs']['en']) is None
def test_mail_text_resend_link(self):
text = '{event}{url}{invoice_name}{invoice_company}'
response = self.client.post(self.target.format(
self.orga1.slug, self.event1.slug), {
'item': 'mail_text_resend_link',
'mail_text_resend_link_0': text
})
assert response.status_code == 200
res = json.loads(response.content.decode())
assert res['item'] == 'mail_text_resend_link'
assert len(res['msgs']) == 1
assert re.match('.*{.*}.*', res['msgs']['en']) is None
def test_mail_text_resend_all_links(self):
text = '{event}{orders}'
response = self.client.post(self.target.format(
self.orga1.slug, self.event1.slug), {
'item': 'mail_text_resend_all_links',
'mail_text_resend_all_links_0': text
})
assert response.status_code == 200
res = json.loads(response.content.decode())
assert res['item'] == 'mail_text_resend_all_links'
assert len(res['msgs']) == 1
assert re.match('.*{.*}.*', res['msgs']['en']) is None
def test_mail_text_order_changed(self):
text = '{event}{url}{invoice_name}{invoice_company}'
response = self.client.post(self.target.format(
self.orga1.slug, self.event1.slug), {
'item': 'mail_text_order_changed',
'mail_text_order_changed_0': text
})
assert response.status_code == 200
res = json.loads(response.content.decode())
assert res['item'] == 'mail_text_order_changed'
assert len(res['msgs']) == 1
assert re.match('.*{.*}.*', res['msgs']['en']) is None
def test_mail_text_order_expire_warning(self):
text = '{event}{url}{expire_date}{invoice_name}{invoice_company}'
response = self.client.post(self.target.format(
self.orga1.slug, self.event1.slug), {
'item': 'mail_text_order_expire_warning',
'mail_text_order_expire_warning_0': text
})
assert response.status_code == 200
res = json.loads(response.content.decode())
assert res['item'] == 'mail_text_order_expire_warning'
assert len(res['msgs']) == 1
assert re.match('.*{.*}.*', res['msgs']['en']) is None
def test_mail_text_waiting_list(self):
text = '{event}{url}{product}{hours}{code}'
response = self.client.post(self.target.format(
self.orga1.slug, self.event1.slug), {
'item': 'mail_text_waiting_list',
'mail_text_waiting_list_0': text
})
assert response.status_code == 200
res = json.loads(response.content.decode())
assert res['item'] == 'mail_text_waiting_list'
assert len(res['msgs']) == 1
assert re.match('.*{.*}.*', res['msgs']['en']) is None
def test_mail_text_order_canceled(self):
text = '{event}{code}{url}'
response = self.client.post(self.target.format(
self.orga1.slug, self.event1.slug), {
'item': 'mail_text_order_canceled',
'mail_text_order_canceled_0': text
})
assert response.status_code == 200
res = json.loads(response.content.decode())
assert res['item'] == 'mail_text_order_canceled'
assert len(res['msgs']) == 1
assert re.match('.*{.*}.*', res['msgs']['en']) is None
def test_unsupported_placeholders(self):
text = '{event1}'
response = self.client.post(self.target.format(
self.orga1.slug, self.event1.slug), {
'item': 'mail_text_waiting_list',
'mail_text_waiting_list_0': text
})
assert response.status_code == 200
res = json.loads(response.content.decode())
assert res['item'] == 'mail_text_waiting_list'
assert len(res['msgs']) == 1
assert res['msgs']['en'] == text
def test_localised_date(self):
dummy_text = '{date}'
response = self.client.post(self.target.format(
self.orga1.slug, self.locale_event.slug), {
'item': 'mail_text_order_placed',
'mail_text_order_placed_0': dummy_text,
'mail_text_order_placed_2': dummy_text
})
assert response.status_code == 200
res = json.loads(response.content.decode())
assert res['item'] == 'mail_text_order_placed'
assert len(res['msgs']) == 2
assert res['msgs']['en'] != res['msgs']['de-informal']
def test_localised_expire_date(self):
dummy_text = '{expire_date}'
response = self.client.post(self.target.format(
self.orga1.slug, self.locale_event.slug), {
'item': 'mail_text_order_expire_warning',
'mail_text_order_expire_warning_0': dummy_text,
'mail_text_order_expire_warning_2': dummy_text
})
assert response.status_code == 200
res = json.loads(response.content.decode())
assert res['item'] == 'mail_text_order_expire_warning'
assert len(res['msgs']) == 2
assert res['msgs']['en'] != res['msgs']['de-informal']
def test_localised_payment_info(self):
dummy_text = '{payment_info}'
response = self.client.post(self.target.format(
self.orga1.slug, self.locale_event.slug), {
'item': 'mail_text_order_paid',
'mail_text_order_paid_0': dummy_text,
'mail_text_order_paid_2': dummy_text
})
assert response.status_code == 200
res = json.loads(response.content.decode())
assert res['item'] == 'mail_text_order_paid'
assert len(res['msgs']) == 2
assert res['msgs']['en'] != res['msgs']['de-informal']
|
[
"re.match",
"pretix.base.models.User.objects.create_user",
"datetime.datetime",
"pretix.base.models.Team.objects.create",
"pretix.base.models.Organizer.objects.create"
] |
[((215, 259), 'pretix.base.models.User.objects.create_user', 'User.objects.create_user', (['"""<EMAIL>"""', '"""dummy"""'], {}), "('<EMAIL>', 'dummy')\n", (239, 259), False, 'from pretix.base.models import Event, Organizer, Team, User\n'), ((281, 329), 'pretix.base.models.Organizer.objects.create', 'Organizer.objects.create', ([], {'name': '"""CCC"""', 'slug': '"""ccc"""'}), "(name='CCC', slug='ccc')\n", (305, 329), False, 'from pretix.base.models import Event, Organizer, Team, User\n'), ((351, 399), 'pretix.base.models.Organizer.objects.create', 'Organizer.objects.create', ([], {'name': '"""MRM"""', 'slug': '"""mrm"""'}), "(name='MRM', slug='mrm')\n", (375, 399), False, 'from pretix.base.models import Event, Organizer, Team, User\n'), ((981, 1081), 'pretix.base.models.Team.objects.create', 'Team.objects.create', ([], {'organizer': 'self.orga1', 'can_change_items': '(True)', 'can_change_event_settings': '(True)'}), '(organizer=self.orga1, can_change_items=True,\n can_change_event_settings=True)\n', (1000, 1081), False, 'from pretix.base.models import Event, Organizer, Team, User\n'), ((6351, 6390), 're.match', 're.match', (['""".*{.*}.*"""', "res['msgs']['en']"], {}), "('.*{.*}.*', res['msgs']['en'])\n", (6359, 6390), False, 'import re\n'), ((6920, 6959), 're.match', 're.match', (['""".*{.*}.*"""', "res['msgs']['en']"], {}), "('.*{.*}.*', res['msgs']['en'])\n", (6928, 6959), False, 'import re\n'), ((7475, 7514), 're.match', 're.match', (['""".*{.*}.*"""', "res['msgs']['en']"], {}), "('.*{.*}.*', res['msgs']['en'])\n", (7483, 7514), False, 'import re\n'), ((8034, 8073), 're.match', 're.match', (['""".*{.*}.*"""', "res['msgs']['en']"], {}), "('.*{.*}.*', res['msgs']['en'])\n", (8042, 8073), False, 'import re\n'), ((8585, 8624), 're.match', 're.match', (['""".*{.*}.*"""', "res['msgs']['en']"], {}), "('.*{.*}.*', res['msgs']['en'])\n", (8593, 8624), False, 'import re\n'), ((9152, 9191), 're.match', 're.match', (['""".*{.*}.*"""', "res['msgs']['en']"], {}), "('.*{.*}.*', res['msgs']['en'])\n", (9160, 9191), False, 'import re\n'), ((9760, 9799), 're.match', 're.match', (['""".*{.*}.*"""', "res['msgs']['en']"], {}), "('.*{.*}.*', res['msgs']['en'])\n", (9768, 9799), False, 'import re\n'), ((10314, 10353), 're.match', 're.match', (['""".*{.*}.*"""', "res['msgs']['en']"], {}), "('.*{.*}.*', res['msgs']['en'])\n", (10322, 10353), False, 'import re\n'), ((10860, 10899), 're.match', 're.match', (['""".*{.*}.*"""', "res['msgs']['en']"], {}), "('.*{.*}.*', res['msgs']['en'])\n", (10868, 10899), False, 'import re\n'), ((526, 587), 'datetime.datetime', 'datetime.datetime', (['(2013)', '(12)', '(26)'], {'tzinfo': 'datetime.timezone.utc'}), '(2013, 12, 26, tzinfo=datetime.timezone.utc)\n', (543, 587), False, 'import datetime\n'), ((796, 857), 'datetime.datetime', 'datetime.datetime', (['(2013)', '(12)', '(26)'], {'tzinfo': 'datetime.timezone.utc'}), '(2013, 12, 26, tzinfo=datetime.timezone.utc)\n', (813, 857), False, 'import datetime\n'), ((1490, 1551), 'datetime.datetime', 'datetime.datetime', (['(2013)', '(12)', '(26)'], {'tzinfo': 'datetime.timezone.utc'}), '(2013, 12, 26, tzinfo=datetime.timezone.utc)\n', (1507, 1551), False, 'import datetime\n')]
|
""" hyperparam search! :)
"""
from ray.tune.schedulers import ASHAScheduler
from ray.tune import CLIReporter
from ray import tune
import numpy as np
from functools import partial
import music_trees as mt
SEEDS = [mt.SEED]
RANDOM_TAXONOMIES = [f'random-taxonomy-{i}' for i in range(10)]
SCRAMBLED_TAXONOMIES = [
f'scrambled-{i}' for i in range(10)]
SCRAMBLED_TAXONOMIES.insert(0, 'deeper-mdb')
# default hyperparameters go here
DEFAULTS = {
'model_name': 'hprotonet',
'height': 1,
'd_root': 128,
'loss_alpha': 1,
'loss_beta': 0.5,
'loss_weight_fn': 'exp',
'dataset': 'mdb-aug',
'num_workers': 20,
'learning_rate': 0.03
}
CONFIGS = {
'data-aug': {
'dataset': tune.grid_search(['mdb-aug', 'mdb']),
},
'height': {
'loss_alpha': NotImplemented,
'height': tune.grid_search([0, 2, 3, 4, 5]),
},
'd_root': {
'd_root': tune.grid_search([64, 128, 256, 512]),
},
'loss-interp-avg-decay': {
'loss_weight_fn': 'interp-avg-decay',
'height': 4,
'loss_alpha': tune.grid_search([0.5, 0.75, 0.9]),
'loss_beta': tune.grid_search([0.25, 0.5, 1]),
},
'loss-baseline-alpha': {
'loss_weight_fn': 'interp-avg-decay',
'height': 4,
'loss_alpha': tune.grid_search([1.0]),
'loss_beta': tune.grid_search([1.0]),
},
'loss-baseline-beta': {
'loss_weight_fn': 'interp-avg-decay',
'height': 4,
'loss_alpha': tune.grid_search([0.75]),
'loss_beta': tune.grid_search([0]),
},
'scrambled-tax': {
'taxonomy_name': tune.grid_search(SCRAMBLED_TAXONOMIES)
},
'baseline-proposed': {
'height': tune.grid_search([0, 4]),
},
'height-v1': {
'height': tune.grid_search([4, 2, 0, 3, 1]),
},
'loss-alpha': {
'height': 4,
'loss_alpha': tune.grid_search([-1, -0.5, 0, 0.5, 1]),
},
'loss-alphav2': {
'height': 4,
'loss_alpha': tune.grid_search([-4, -3, -2, -1, 1, 2, 3, 4]),
}
}
class Experiment:
def __init__(self, name: str, defaults: dict, config: dict, gpu_fraction: float):
self.name = name
self.config = config
self.hparams = argparse.Namespace(
**{k: v for k, v in defaults.items() if k not in config})
self.gpu_fraction = gpu_fraction
def hparams2args(hparams):
args = []
for k, v in vars(hparams).items():
args.append(f'--{k}')
args.append(f'{v}')
return args
def run_trial(config, **kwargs):
hparams = argparse.Namespace(**kwargs)
hparams.__dict__.update(config)
hparams.name = exp.name.upper() + '-' + \
f'_'.join(f"{k}={v}" for k, v in config.items())
parser = mt.train.load_parser(known_args=hparams2args(hparams))
del hparams.parent_name
del hparams.checkpoint_dir
hparams = parser.parse_args(args=hparams2args(hparams))
return mt.train.train(hparams, use_ray=True)
def run_experiment(exp):
scheduler = ASHAScheduler(
metric="f1/protonet/val",
mode="max",
max_t=mt.train.MAX_EPISODES,
grace_period=mt.train.MAX_EPISODES,
reduction_factor=2)
reporter = CLIReporter(
metric_columns=["f1/protonet/train",
"f1/protonet/val", ])
result = tune.run(
partial(run_trial, **vars(exp.hparams)),
name=exp.name,
local_dir=mt.RUNS_DIR,
resources_per_trial={"cpu": 1, "gpu": exp.gpu_fraction},
config=exp.config,
num_samples=1,
scheduler=scheduler,
progress_reporter=reporter)
df = result.results_df
df.to_csv(str(mt.train.get_exp_dir(exp.hparams.name,
exp.hparams.version)/'ray-results.csv'))
if __name__ == "__main__":
import argparse
from datetime import datetime
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--name', type=str, required=True)
parser.add_argument('--gpu_capacity', type=float, default=1.0)
args = parser.parse_args()
parent_name = args.name + '-' + datetime.now().strftime("%m.%d.%Y")
mt.RUNS_DIR = mt.RUNS_DIR / parent_name
mt.TQDM_DISABLE = True
exp = Experiment(name=args.name, defaults=DEFAULTS,
config=CONFIGS[args.name], gpu_fraction=args.gpu_capacity)
exp.hparams.parent_name = parent_name
run_experiment(exp)
|
[
"argparse.Namespace",
"music_trees.train.train",
"argparse.ArgumentParser",
"ray.tune.CLIReporter",
"ray.tune.grid_search",
"ray.tune.schedulers.ASHAScheduler",
"music_trees.train.get_exp_dir",
"datetime.datetime.now"
] |
[((2564, 2592), 'argparse.Namespace', 'argparse.Namespace', ([], {}), '(**kwargs)\n', (2582, 2592), False, 'import argparse\n'), ((2933, 2970), 'music_trees.train.train', 'mt.train.train', (['hparams'], {'use_ray': '(True)'}), '(hparams, use_ray=True)\n', (2947, 2970), True, 'import music_trees as mt\n'), ((3015, 3156), 'ray.tune.schedulers.ASHAScheduler', 'ASHAScheduler', ([], {'metric': '"""f1/protonet/val"""', 'mode': '"""max"""', 'max_t': 'mt.train.MAX_EPISODES', 'grace_period': 'mt.train.MAX_EPISODES', 'reduction_factor': '(2)'}), "(metric='f1/protonet/val', mode='max', max_t=mt.train.\n MAX_EPISODES, grace_period=mt.train.MAX_EPISODES, reduction_factor=2)\n", (3028, 3156), False, 'from ray.tune.schedulers import ASHAScheduler\n'), ((3209, 3277), 'ray.tune.CLIReporter', 'CLIReporter', ([], {'metric_columns': "['f1/protonet/train', 'f1/protonet/val']"}), "(metric_columns=['f1/protonet/train', 'f1/protonet/val'])\n", (3220, 3277), False, 'from ray.tune import CLIReporter\n'), ((3857, 3936), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), '(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n', (3880, 3936), False, 'import argparse\n'), ((715, 751), 'ray.tune.grid_search', 'tune.grid_search', (["['mdb-aug', 'mdb']"], {}), "(['mdb-aug', 'mdb'])\n", (731, 751), False, 'from ray import tune\n'), ((832, 865), 'ray.tune.grid_search', 'tune.grid_search', (['[0, 2, 3, 4, 5]'], {}), '([0, 2, 3, 4, 5])\n', (848, 865), False, 'from ray import tune\n'), ((908, 945), 'ray.tune.grid_search', 'tune.grid_search', (['[64, 128, 256, 512]'], {}), '([64, 128, 256, 512])\n', (924, 945), False, 'from ray import tune\n'), ((1074, 1108), 'ray.tune.grid_search', 'tune.grid_search', (['[0.5, 0.75, 0.9]'], {}), '([0.5, 0.75, 0.9])\n', (1090, 1108), False, 'from ray import tune\n'), ((1131, 1163), 'ray.tune.grid_search', 'tune.grid_search', (['[0.25, 0.5, 1]'], {}), '([0.25, 0.5, 1])\n', (1147, 1163), False, 'from ray import tune\n'), ((1290, 1313), 'ray.tune.grid_search', 'tune.grid_search', (['[1.0]'], {}), '([1.0])\n', (1306, 1313), False, 'from ray import tune\n'), ((1336, 1359), 'ray.tune.grid_search', 'tune.grid_search', (['[1.0]'], {}), '([1.0])\n', (1352, 1359), False, 'from ray import tune\n'), ((1485, 1509), 'ray.tune.grid_search', 'tune.grid_search', (['[0.75]'], {}), '([0.75])\n', (1501, 1509), False, 'from ray import tune\n'), ((1532, 1553), 'ray.tune.grid_search', 'tune.grid_search', (['[0]'], {}), '([0])\n', (1548, 1553), False, 'from ray import tune\n'), ((1610, 1648), 'ray.tune.grid_search', 'tune.grid_search', (['SCRAMBLED_TAXONOMIES'], {}), '(SCRAMBLED_TAXONOMIES)\n', (1626, 1648), False, 'from ray import tune\n'), ((1701, 1725), 'ray.tune.grid_search', 'tune.grid_search', (['[0, 4]'], {}), '([0, 4])\n', (1717, 1725), False, 'from ray import tune\n'), ((1771, 1804), 'ray.tune.grid_search', 'tune.grid_search', (['[4, 2, 0, 3, 1]'], {}), '([4, 2, 0, 3, 1])\n', (1787, 1804), False, 'from ray import tune\n'), ((1876, 1915), 'ray.tune.grid_search', 'tune.grid_search', (['[-1, -0.5, 0, 0.5, 1]'], {}), '([-1, -0.5, 0, 0.5, 1])\n', (1892, 1915), False, 'from ray import tune\n'), ((1989, 2035), 'ray.tune.grid_search', 'tune.grid_search', (['[-4, -3, -2, -1, 1, 2, 3, 4]'], {}), '([-4, -3, -2, -1, 1, 2, 3, 4])\n', (2005, 2035), False, 'from ray import tune\n'), ((3666, 3725), 'music_trees.train.get_exp_dir', 'mt.train.get_exp_dir', (['exp.hparams.name', 'exp.hparams.version'], {}), '(exp.hparams.name, exp.hparams.version)\n', (3686, 3725), True, 'import music_trees as mt\n'), ((4142, 4156), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4154, 4156), False, 'from datetime import datetime\n')]
|
'''Main tests in API'''
import unittest
from bokeh.plotting import figure
from model.charts.line import Line
class LineChartConfigTest():
# class LineChartConfigTest(unittest.TestCase):
''' Test behaviours linked to foundational bar charts capabilities '''
def test_bar_chart_config_no_option(self):
''' Tests if default config is set when no option is given '''
chart = Line().chart_config(figure(), None)
self.assertEqual(chart.xaxis.visible, True)
self.assertEqual(chart.yaxis.visible, True)
def test_bar_chart_config_no_chart_option(self):
''' Tests if default config is set when no chart_option is given '''
chart = Line().chart_config(figure(), {})
self.assertEqual(chart.xaxis.visible, True)
self.assertEqual(chart.yaxis.visible, True)
def test_bar_chart_config(self):
''' Tests if default config is set when no chart_option is given '''
chart = Line().chart_config(
figure(),
{'chart_options': {'show_x_axis': False, 'show_y_axis': True}})
self.assertEqual(chart.xaxis.visible, False)
self.assertEqual(chart.yaxis.visible, True)
def test_bar_chart_config_reverse_visibility(self):
''' Tests if default config is set when no chart_option is given '''
chart = Line().chart_config(
figure(),
{'chart_options': {'show_x_axis': True, 'show_y_axis': False}})
self.assertEqual(chart.xaxis.visible, True)
self.assertEqual(chart.yaxis.visible, False)
|
[
"model.charts.line.Line",
"bokeh.plotting.figure"
] |
[((416, 424), 'bokeh.plotting.figure', 'figure', ([], {}), '()\n', (422, 424), False, 'from bokeh.plotting import figure\n'), ((703, 711), 'bokeh.plotting.figure', 'figure', ([], {}), '()\n', (709, 711), False, 'from bokeh.plotting import figure\n'), ((985, 993), 'bokeh.plotting.figure', 'figure', ([], {}), '()\n', (991, 993), False, 'from bokeh.plotting import figure\n'), ((1359, 1367), 'bokeh.plotting.figure', 'figure', ([], {}), '()\n', (1365, 1367), False, 'from bokeh.plotting import figure\n'), ((396, 402), 'model.charts.line.Line', 'Line', ([], {}), '()\n', (400, 402), False, 'from model.charts.line import Line\n'), ((683, 689), 'model.charts.line.Line', 'Line', ([], {}), '()\n', (687, 689), False, 'from model.charts.line import Line\n'), ((952, 958), 'model.charts.line.Line', 'Line', ([], {}), '()\n', (956, 958), False, 'from model.charts.line import Line\n'), ((1326, 1332), 'model.charts.line.Line', 'Line', ([], {}), '()\n', (1330, 1332), False, 'from model.charts.line import Line\n')]
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import flt
from frappe.utils import formatdate
from erpnext.controllers.trends import get_period_date_ranges, get_period_month_ranges
def execute(filters=None):
if not filters: filters = {}
columns = get_columns(filters)
cost_centers = get_cost_centers(filters)
period_month_ranges = get_period_month_ranges(filters["period"], filters["fiscal_year"])
cam_map = get_cost_center_account_month_map(filters)
data = []
for cost_center in cost_centers:
cost_center_items = cam_map.get(cost_center)
if cost_center_items:
for account, monthwise_data in cost_center_items.items():
row = [cost_center, account]
totals = [0, 0, 0]
for relevant_months in period_month_ranges:
period_data = [0, 0, 0]
for month in relevant_months:
month_data = monthwise_data.get(month, {})
for i, fieldname in enumerate(["target", "actual", "variance"]):
value = flt(month_data.get(fieldname))
period_data[i] += value
totals[i] += value
period_data[2] = period_data[0] - period_data[1]
row += period_data
totals[2] = totals[0] - totals[1]
row += totals
data.append(row)
return columns, data
def get_columns(filters):
columns = [_(filters.get("budget_against")) + ":Link/%s:120"%(filters.get("budget_against")), _("Account") + ":Link/Account:120"]
group_months = False if filters["period"] == "Monthly" else True
for from_date, to_date in get_period_date_ranges(filters["period"], filters["fiscal_year"]):
for label in [_("Target") + " (%s)", _("Actual") + " (%s)", _("Variance") + " (%s)"]:
if group_months:
label = label % (formatdate(from_date, format_string="MMM") + " - " + formatdate(to_date, format_string="MMM"))
else:
label = label % formatdate(from_date, format_string="MMM")
columns.append(label+":Float:120")
return columns + [_("Total Target") + ":Float:120", _("Total Actual") + ":Float:120",
_("Total Variance") + ":Float:120"]
def get_cost_centers(filters):
cond = "and 1=1"
if filters.get("budget_against") == "Cost Center":
cond = "order by lft"
return frappe.db.sql_list("""select name from `tab{tab}` where company=%s
{cond}""".format(tab=filters.get("budget_against"), cond=cond), filters.get("company"))
#Get cost center & target details
def get_cost_center_target_details(filters):
return frappe.db.sql("""
select b.{budget_against} as budget_against, b.monthly_distribution, ba.account, ba.budget_amount
from `tabBudget` b, `tabBudget Account` ba
where b.name=ba.parent and b.docstatus = 1 and b.fiscal_year=%s
and b.budget_against = %s and b.company=%s
""".format(budget_against=filters.get("budget_against").replace(" ", "_").lower()),
(filters.fiscal_year, filters.budget_against, filters.company), as_dict=True)
#Get target distribution details of accounts of cost center
def get_target_distribution_details(filters):
target_details = {}
for d in frappe.db.sql("""select md.name, mdp.month, mdp.percentage_allocation
from `tabMonthly Distribution Percentage` mdp, `tabMonthly Distribution` md
where mdp.parent=md.name and md.fiscal_year=%s""", (filters["fiscal_year"]), as_dict=1):
target_details.setdefault(d.name, {}).setdefault(d.month, flt(d.percentage_allocation))
return target_details
#Get actual details from gl entry
def get_actual_details(name, filters):
cond = "1=1"
budget_against=filters.get("budget_against").replace(" ", "_").lower()
if filters.get("budget_against") == "Cost Center":
cc_lft, cc_rgt = frappe.db.get_value("Cost Center", name, ["lft", "rgt"])
cond = "lft>='{lft}' and rgt<='{rgt}'".format(lft = cc_lft, rgt=cc_rgt)
ac_details = frappe.db.sql("""select gl.account, gl.debit, gl.credit,
MONTHNAME(gl.posting_date) as month_name, b.{budget_against} as budget_against
from `tabGL Entry` gl, `tabBudget Account` ba, `tabBudget` b
where
b.name = ba.parent
and b.docstatus = 1
and ba.account=gl.account
and b.{budget_against} = gl.{budget_against}
and gl.fiscal_year=%s
and b.{budget_against}=%s
and exists(select name from `tab{tab}` where name=gl.{budget_against} and {cond})
""".format(tab = filters.budget_against, budget_against = budget_against, cond = cond),
(filters.fiscal_year, name), as_dict=1)
cc_actual_details = {}
for d in ac_details:
cc_actual_details.setdefault(d.account, []).append(d)
return cc_actual_details
def get_cost_center_account_month_map(filters):
import datetime
cost_center_target_details = get_cost_center_target_details(filters)
tdd = get_target_distribution_details(filters)
cam_map = {}
for ccd in cost_center_target_details:
actual_details = get_actual_details(ccd.budget_against, filters)
for month_id in range(1, 13):
month = datetime.date(2013, month_id, 1).strftime('%B')
cam_map.setdefault(ccd.budget_against, {}).setdefault(ccd.account, {})\
.setdefault(month, frappe._dict({
"target": 0.0, "actual": 0.0
}))
tav_dict = cam_map[ccd.budget_against][ccd.account][month]
month_percentage = tdd.get(ccd.monthly_distribution, {}).get(month, 0) \
if ccd.monthly_distribution else 100.0/12
tav_dict.target = flt(ccd.budget_amount) * month_percentage / 100
for ad in actual_details.get(ccd.account, []):
if ad.month_name == month:
tav_dict.actual += flt(ad.debit) - flt(ad.credit)
return cam_map
|
[
"frappe.utils.flt",
"frappe.utils.formatdate",
"erpnext.controllers.trends.get_period_date_ranges",
"frappe.db.sql",
"frappe.db.get_value",
"datetime.date",
"erpnext.controllers.trends.get_period_month_ranges",
"frappe._dict",
"frappe._"
] |
[((511, 577), 'erpnext.controllers.trends.get_period_month_ranges', 'get_period_month_ranges', (["filters['period']", "filters['fiscal_year']"], {}), "(filters['period'], filters['fiscal_year'])\n", (534, 577), False, 'from erpnext.controllers.trends import get_period_date_ranges, get_period_month_ranges\n'), ((1632, 1697), 'erpnext.controllers.trends.get_period_date_ranges', 'get_period_date_ranges', (["filters['period']", "filters['fiscal_year']"], {}), "(filters['period'], filters['fiscal_year'])\n", (1654, 1697), False, 'from erpnext.controllers.trends import get_period_date_ranges, get_period_month_ranges\n'), ((3125, 3370), 'frappe.db.sql', 'frappe.db.sql', (['"""select md.name, mdp.month, mdp.percentage_allocation\n\t\tfrom `tabMonthly Distribution Percentage` mdp, `tabMonthly Distribution` md\n\t\twhere mdp.parent=md.name and md.fiscal_year=%s"""', "filters['fiscal_year']"], {'as_dict': '(1)'}), '(\n """select md.name, mdp.month, mdp.percentage_allocation\n\t\tfrom `tabMonthly Distribution Percentage` mdp, `tabMonthly Distribution` md\n\t\twhere mdp.parent=md.name and md.fiscal_year=%s"""\n , filters[\'fiscal_year\'], as_dict=1)\n', (3138, 3370), False, 'import frappe\n'), ((3711, 3767), 'frappe.db.get_value', 'frappe.db.get_value', (['"""Cost Center"""', 'name', "['lft', 'rgt']"], {}), "('Cost Center', name, ['lft', 'rgt'])\n", (3730, 3767), False, 'import frappe\n'), ((1501, 1513), 'frappe._', '_', (['"""Account"""'], {}), "('Account')\n", (1502, 1513), False, 'from frappe import _\n'), ((3425, 3453), 'frappe.utils.flt', 'flt', (['d.percentage_allocation'], {}), '(d.percentage_allocation)\n', (3428, 3453), False, 'from frappe.utils import flt\n'), ((1715, 1726), 'frappe._', '_', (['"""Target"""'], {}), "('Target')\n", (1716, 1726), False, 'from frappe import _\n'), ((1738, 1749), 'frappe._', '_', (['"""Actual"""'], {}), "('Actual')\n", (1739, 1749), False, 'from frappe import _\n'), ((1761, 1774), 'frappe._', '_', (['"""Variance"""'], {}), "('Variance')\n", (1762, 1774), False, 'from frappe import _\n'), ((2054, 2071), 'frappe._', '_', (['"""Total Target"""'], {}), "('Total Target')\n", (2055, 2071), False, 'from frappe import _\n'), ((2088, 2105), 'frappe._', '_', (['"""Total Actual"""'], {}), "('Total Actual')\n", (2089, 2105), False, 'from frappe import _\n'), ((2124, 2143), 'frappe._', '_', (['"""Total Variance"""'], {}), "('Total Variance')\n", (2125, 2143), False, 'from frappe import _\n'), ((5089, 5133), 'frappe._dict', 'frappe._dict', (["{'target': 0.0, 'actual': 0.0}"], {}), "({'target': 0.0, 'actual': 0.0})\n", (5101, 5133), False, 'import frappe\n'), ((1952, 1994), 'frappe.utils.formatdate', 'formatdate', (['from_date'], {'format_string': '"""MMM"""'}), "(from_date, format_string='MMM')\n", (1962, 1994), False, 'from frappe.utils import formatdate\n'), ((4942, 4974), 'datetime.date', 'datetime.date', (['(2013)', 'month_id', '(1)'], {}), '(2013, month_id, 1)\n', (4955, 4974), False, 'import datetime\n'), ((5353, 5375), 'frappe.utils.flt', 'flt', (['ccd.budget_amount'], {}), '(ccd.budget_amount)\n', (5356, 5375), False, 'from frappe.utils import flt\n'), ((1881, 1921), 'frappe.utils.formatdate', 'formatdate', (['to_date'], {'format_string': '"""MMM"""'}), "(to_date, format_string='MMM')\n", (1891, 1921), False, 'from frappe.utils import formatdate\n'), ((5511, 5524), 'frappe.utils.flt', 'flt', (['ad.debit'], {}), '(ad.debit)\n', (5514, 5524), False, 'from frappe.utils import flt\n'), ((5527, 5541), 'frappe.utils.flt', 'flt', (['ad.credit'], {}), '(ad.credit)\n', (5530, 5541), False, 'from frappe.utils import flt\n'), ((1828, 1870), 'frappe.utils.formatdate', 'formatdate', (['from_date'], {'format_string': '"""MMM"""'}), "(from_date, format_string='MMM')\n", (1838, 1870), False, 'from frappe.utils import formatdate\n')]
|
import os
import sys
import speedtest
import pyfiglet
os.system("clear")
banner = pyfiglet.figlet_format("WifiSpeedTester", font="slant" )
print (banner)
print (" Author : <NAME>(rkt)")
print (" Github : https://github.com/r3k4t")
st = speedtest.Speedtest()
option = int(input('''What speed do you want to test:
1) Download Speed
2) Upload Speed
3) Ping
Your Choice: '''))
if option == 1:
print(st.download())
elif option == 2:
print(st.upload())
elif option == 3:
st.download()
st.upload()
servername = []
st.get_servers(servername)
print(st.results.ping)
else:
print("Please enter the correct choice !")
|
[
"speedtest.Speedtest",
"os.system",
"pyfiglet.figlet_format"
] |
[((59, 77), 'os.system', 'os.system', (['"""clear"""'], {}), "('clear')\n", (68, 77), False, 'import os\n'), ((88, 143), 'pyfiglet.figlet_format', 'pyfiglet.figlet_format', (['"""WifiSpeedTester"""'], {'font': '"""slant"""'}), "('WifiSpeedTester', font='slant')\n", (110, 143), False, 'import pyfiglet\n'), ((289, 310), 'speedtest.Speedtest', 'speedtest.Speedtest', ([], {}), '()\n', (308, 310), False, 'import speedtest\n')]
|
import numpy
from sympy import Rational as frac
from sympy import pi, sqrt
from ..helpers import article, fsd, pm, untangle
from ._helpers import Enr2Scheme
citation = article(
authors=["<NAME>", "<NAME>"],
title="Approximate integration formulas for certain spherically symmetric regions",
journal="Math. Comp.",
volume="17",
year="1963",
pages="105-135",
url="https://doi.org/10.1090/S0025-5718-1963-0161473-0",
)
def stroud_secrest_1(n):
data = [(frac(1, n + 1), sqrt(frac(1, 2)) * _nsimplex(n))]
points, weights = untangle(data)
weights *= sqrt(pi) ** n
return Enr2Scheme("Stroud-Secrest I", n, weights, points, 2, citation)
def stroud_secrest_2(n):
nu = sqrt(frac(n, 2))
data = [(frac(1, 2 * n), fsd(n, (nu, 1)))]
points, weights = untangle(data)
weights *= sqrt(pi) ** n
return Enr2Scheme("Stroud-Secrest II", n, weights, points, 3, citation)
def stroud_secrest_3(n):
nu = sqrt(frac(1, 2))
data = [(frac(1, 2 ** n), pm(n, nu))]
points, weights = untangle(data)
weights *= sqrt(pi) ** n
return Enr2Scheme("Stroud-Secrest III", n, weights, points, 3, citation)
def stroud_secrest_4(n):
nu = sqrt(frac(n + 2, 2))
xi = sqrt(frac(n + 2, 4))
A = frac(2, n + 2)
B = frac(4 - n, 2 * (n + 2) ** 2)
C = frac(1, (n + 2) ** 2)
data = [(A, numpy.full((1, n), 0)), (B, fsd(n, (nu, 1))), (C, fsd(n, (xi, 2)))]
points, weights = untangle(data)
weights *= sqrt(pi) ** n
return Enr2Scheme("Stroud-Secrest IV", n, weights, points, 5, citation)
def _nsimplex(n):
# construct the regular n-simplex points with 0 center
return numpy.array(
[
[-sqrt(frac(n + 1, (n + 1 - k) * (n - k))) for k in range(i)]
+ [sqrt(frac((n + 1) * (n - i), n + 1 - i))]
+ (n - i - 1) * [0]
for i in range(n)
]
+ [[-sqrt(frac(n + 1, (n + 1 - i) * (n - i))) for i in range(n)]]
)
|
[
"numpy.full",
"sympy.sqrt",
"sympy.Rational"
] |
[((1252, 1266), 'sympy.Rational', 'frac', (['(2)', '(n + 2)'], {}), '(2, n + 2)\n', (1256, 1266), True, 'from sympy import Rational as frac\n'), ((1275, 1304), 'sympy.Rational', 'frac', (['(4 - n)', '(2 * (n + 2) ** 2)'], {}), '(4 - n, 2 * (n + 2) ** 2)\n', (1279, 1304), True, 'from sympy import Rational as frac\n'), ((1313, 1334), 'sympy.Rational', 'frac', (['(1)', '((n + 2) ** 2)'], {}), '(1, (n + 2) ** 2)\n', (1317, 1334), True, 'from sympy import Rational as frac\n'), ((588, 596), 'sympy.sqrt', 'sqrt', (['pi'], {}), '(pi)\n', (592, 596), False, 'from sympy import pi, sqrt\n'), ((718, 728), 'sympy.Rational', 'frac', (['n', '(2)'], {}), '(n, 2)\n', (722, 728), True, 'from sympy import Rational as frac\n'), ((829, 837), 'sympy.sqrt', 'sqrt', (['pi'], {}), '(pi)\n', (833, 837), False, 'from sympy import pi, sqrt\n'), ((960, 970), 'sympy.Rational', 'frac', (['(1)', '(2)'], {}), '(1, 2)\n', (964, 970), True, 'from sympy import Rational as frac\n'), ((1066, 1074), 'sympy.sqrt', 'sqrt', (['pi'], {}), '(pi)\n', (1070, 1074), False, 'from sympy import pi, sqrt\n'), ((1198, 1212), 'sympy.Rational', 'frac', (['(n + 2)', '(2)'], {}), '(n + 2, 2)\n', (1202, 1212), True, 'from sympy import Rational as frac\n'), ((1228, 1242), 'sympy.Rational', 'frac', (['(n + 2)', '(4)'], {}), '(n + 2, 4)\n', (1232, 1242), True, 'from sympy import Rational as frac\n'), ((1472, 1480), 'sympy.sqrt', 'sqrt', (['pi'], {}), '(pi)\n', (1476, 1480), False, 'from sympy import pi, sqrt\n'), ((486, 500), 'sympy.Rational', 'frac', (['(1)', '(n + 1)'], {}), '(1, n + 1)\n', (490, 500), True, 'from sympy import Rational as frac\n'), ((743, 757), 'sympy.Rational', 'frac', (['(1)', '(2 * n)'], {}), '(1, 2 * n)\n', (747, 757), True, 'from sympy import Rational as frac\n'), ((985, 1000), 'sympy.Rational', 'frac', (['(1)', '(2 ** n)'], {}), '(1, 2 ** n)\n', (989, 1000), True, 'from sympy import Rational as frac\n'), ((1352, 1373), 'numpy.full', 'numpy.full', (['(1, n)', '(0)'], {}), '((1, n), 0)\n', (1362, 1373), False, 'import numpy\n'), ((507, 517), 'sympy.Rational', 'frac', (['(1)', '(2)'], {}), '(1, 2)\n', (511, 517), True, 'from sympy import Rational as frac\n'), ((1896, 1930), 'sympy.Rational', 'frac', (['(n + 1)', '((n + 1 - i) * (n - i))'], {}), '(n + 1, (n + 1 - i) * (n - i))\n', (1900, 1930), True, 'from sympy import Rational as frac\n'), ((1769, 1803), 'sympy.Rational', 'frac', (['((n + 1) * (n - i))', '(n + 1 - i)'], {}), '((n + 1) * (n - i), n + 1 - i)\n', (1773, 1803), True, 'from sympy import Rational as frac\n'), ((1694, 1728), 'sympy.Rational', 'frac', (['(n + 1)', '((n + 1 - k) * (n - k))'], {}), '(n + 1, (n + 1 - k) * (n - k))\n', (1698, 1728), True, 'from sympy import Rational as frac\n')]
|
#!/usr/bin/env python3
# Copyright 2010-2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of a simple nurse scheduling problem."""
# [START program]
# [START import]
from ortools.sat.python import cp_model
# [END import]
# [START solution_printer]
class NursesPartialSolutionPrinter(cp_model.CpSolverSolutionCallback):
"""Print intermediate solutions."""
def __init__(self, shifts, num_nurses, num_days, num_shifts, sols):
cp_model.CpSolverSolutionCallback.__init__(self)
self._shifts = shifts
self._num_nurses = num_nurses
self._num_days = num_days
self._num_shifts = num_shifts
self._solutions = set(sols)
self._solution_count = 0
def on_solution_callback(self):
if self._solution_count in self._solutions:
print('Solution %i' % self._solution_count)
for d in range(self._num_days):
print('Day %i' % d)
for n in range(self._num_nurses):
is_working = False
for s in range(self._num_shifts):
if self.Value(self._shifts[(n, d, s)]):
is_working = True
print(' Nurse %i works shift %i' % (n, s))
if not is_working:
print(' Nurse {} does not work'.format(n))
print()
self._solution_count += 1
def solution_count(self):
return self._solution_count
# [END solution_printer]
def main():
# Data.
# [START data]
num_nurses = 4
num_shifts = 3
num_days = 3
all_nurses = range(num_nurses)
all_shifts = range(num_shifts)
all_days = range(num_days)
# [END data]
# Creates the model.
# [START model]
model = cp_model.CpModel()
# [END model]
# Creates shift variables.
# shifts[(n, d, s)]: nurse 'n' works shift 's' on day 'd'.
# [START variables]
shifts = {}
for n in all_nurses:
for d in all_days:
for s in all_shifts:
shifts[(n, d,
s)] = model.NewBoolVar('shift_n%id%is%i' % (n, d, s))
# [END variables]
# Each shift is assigned to exactly one nurse in the schedule period.
# [START exactly_one_nurse]
for d in all_days:
for s in all_shifts:
model.Add(sum(shifts[(n, d, s)] for n in all_nurses) == 1)
# [END exactly_one_nurse]
# Each nurse works at most one shift per day.
# [START at_most_one_shift]
for n in all_nurses:
for d in all_days:
model.Add(sum(shifts[(n, d, s)] for s in all_shifts) <= 1)
# [END at_most_one_shift]
# [START assign_nurses_evenly]
# Try to distribute the shifts evenly, so that each nurse works
# min_shifts_per_nurse shifts. If this is not possible, because the total
# number of shifts is not divisible by the number of nurses, some nurses will
# be assigned one more shift.
min_shifts_per_nurse = (num_shifts * num_days) // num_nurses
if num_shifts * num_days % num_nurses == 0:
max_shifts_per_nurse = min_shifts_per_nurse
else:
max_shifts_per_nurse = min_shifts_per_nurse + 1
for n in all_nurses:
num_shifts_worked = 0
for d in all_days:
for s in all_shifts:
num_shifts_worked += shifts[(n, d, s)]
model.Add(min_shifts_per_nurse <= num_shifts_worked)
model.Add(num_shifts_worked <= max_shifts_per_nurse)
# [END assign_nurses_evenly]
# Creates the solver and solve.
# [START solve]
solver = cp_model.CpSolver()
solver.parameters.linearization_level = 0
# Enumerate all solutions.
solver.parameters.enumerate_all_solutions = True
# Display the first five solutions.
a_few_solutions = range(5)
solution_printer = NursesPartialSolutionPrinter(shifts, num_nurses,
num_days, num_shifts,
a_few_solutions)
solver.Solve(model, solution_printer)
# [END solve]
# Statistics.
print()
print('Statistics')
print(' - conflicts : %i' % solver.NumConflicts())
print(' - branches : %i' % solver.NumBranches())
print(' - wall time : %f s' % solver.WallTime())
print(' - solutions found : %i' % solution_printer.solution_count())
if __name__ == '__main__':
main()
# [END program]
|
[
"ortools.sat.python.cp_model.CpSolverSolutionCallback.__init__",
"ortools.sat.python.cp_model.CpSolver",
"ortools.sat.python.cp_model.CpModel"
] |
[((2301, 2319), 'ortools.sat.python.cp_model.CpModel', 'cp_model.CpModel', ([], {}), '()\n', (2317, 2319), False, 'from ortools.sat.python import cp_model\n'), ((4108, 4127), 'ortools.sat.python.cp_model.CpSolver', 'cp_model.CpSolver', ([], {}), '()\n', (4125, 4127), False, 'from ortools.sat.python import cp_model\n'), ((964, 1012), 'ortools.sat.python.cp_model.CpSolverSolutionCallback.__init__', 'cp_model.CpSolverSolutionCallback.__init__', (['self'], {}), '(self)\n', (1006, 1012), False, 'from ortools.sat.python import cp_model\n')]
|
import os
import time
import copy
import torch
import matplotlib
import torchvision
import torch.nn as nn
import numpy as np
import torch.optim as optim
import matplotlib.pyplot as plt
from pathlib import Path
from torch.optim import lr_scheduler
from torchvision import datasets, models, transforms
import libs.dirs as dirs
from libs.utils import *
from models.trainer_class import TrainModel
def torch_imshow(gridInput, mean, std, title=None):
gridInput = gridInput.numpy().transpose((1,2,0))
gridInput = std*gridInput + mean
gridInput = np.clip(gridInput, 0, 1)
ax = plt.imshow(gridInput)
plt.title(title)
# plt.pause(0.01)
# plt.imsave("../images/testgrid.png", gridInput)
if __name__ == "__main__":
datasetPath = Path(dirs.dataset) / "torch/hymenoptera_data"
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
dataTransforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean, std),
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean, std),
])
}
# Dataset loaders for train and val sets
imageDatasets = {
x: datasets.ImageFolder(str(datasetPath / x), dataTransforms[x]) for x in ['train', 'val']
}
# Get an image batch of the training set
# inputs, classes = next(iter(dataloaders['train']))
# Make a grid and display it
# imgGrid = torchvision.utils.make_grid(inputs)
# torch_imshow(imgGrid, mean, std, title=[classNames[x] for x in classes])
# plt.show()
# Instantiate trainer object
trainer = TrainModel()
# device = torch.device('cuda:0')
modelFineTune = trainer.define_model_resnet18(finetune=True)
criterion = nn.CrossEntropyLoss()
# Set optimizer
optimizerFineTune = optim.SGD(modelFineTune.parameters(), lr=0.001, momentum=0.9)
# Scheduler for learning rate decay
expLrScheduler = lr_scheduler.StepLR(optimizerFineTune, step_size=7, gamma=0.1)
# Perform training
trainer.load_data(imageDatasets, num_examples_per_batch=4)
modelFineTune = trainer.train(modelFineTune, criterion, optimizerFineTune, expLrScheduler, num_epochs=25)
|
[
"matplotlib.pyplot.title",
"torch.optim.lr_scheduler.StepLR",
"torchvision.transforms.RandomHorizontalFlip",
"models.trainer_class.TrainModel",
"matplotlib.pyplot.imshow",
"torchvision.transforms.Normalize",
"torch.nn.CrossEntropyLoss",
"numpy.clip",
"pathlib.Path",
"torchvision.transforms.CenterCrop",
"torchvision.transforms.RandomResizedCrop",
"torchvision.transforms.Resize",
"torchvision.transforms.ToTensor"
] |
[((657, 681), 'numpy.clip', 'np.clip', (['gridInput', '(0)', '(1)'], {}), '(gridInput, 0, 1)\n', (664, 681), True, 'import numpy as np\n'), ((692, 713), 'matplotlib.pyplot.imshow', 'plt.imshow', (['gridInput'], {}), '(gridInput)\n', (702, 713), True, 'import matplotlib.pyplot as plt\n'), ((718, 734), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (727, 734), True, 'import matplotlib.pyplot as plt\n'), ((2004, 2016), 'models.trainer_class.TrainModel', 'TrainModel', ([], {}), '()\n', (2014, 2016), False, 'from models.trainer_class import TrainModel\n'), ((2142, 2163), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (2161, 2163), True, 'import torch.nn as nn\n'), ((2333, 2395), 'torch.optim.lr_scheduler.StepLR', 'lr_scheduler.StepLR', (['optimizerFineTune'], {'step_size': '(7)', 'gamma': '(0.1)'}), '(optimizerFineTune, step_size=7, gamma=0.1)\n', (2352, 2395), False, 'from torch.optim import lr_scheduler\n'), ((857, 875), 'pathlib.Path', 'Path', (['dirs.dataset'], {}), '(dirs.dataset)\n', (861, 875), False, 'from pathlib import Path\n'), ((1058, 1091), 'torchvision.transforms.RandomResizedCrop', 'transforms.RandomResizedCrop', (['(224)'], {}), '(224)\n', (1086, 1091), False, 'from torchvision import datasets, models, transforms\n'), ((1113, 1146), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (1144, 1146), False, 'from torchvision import datasets, models, transforms\n'), ((1168, 1189), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1187, 1189), False, 'from torchvision import datasets, models, transforms\n'), ((1211, 1242), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['mean', 'std'], {}), '(mean, std)\n', (1231, 1242), False, 'from torchvision import datasets, models, transforms\n'), ((1312, 1334), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256)'], {}), '(256)\n', (1329, 1334), False, 'from torchvision import datasets, models, transforms\n'), ((1356, 1382), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (1377, 1382), False, 'from torchvision import datasets, models, transforms\n'), ((1404, 1425), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1423, 1425), False, 'from torchvision import datasets, models, transforms\n'), ((1447, 1478), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['mean', 'std'], {}), '(mean, std)\n', (1467, 1478), False, 'from torchvision import datasets, models, transforms\n')]
|
import colorsys
import copy
import os
import time
import numpy as np
import torch
from PIL import Image, ImageDraw, ImageFont
from nets.frcnn import FasterRCNN
from utils.utils import DecodeBox, get_new_img_size
#--------------------------------------------#
# 使用自己训练好的模型预测需要修改2个参数
# model_path和classes_path都需要修改!
# 如果出现shape不匹配
# 一定要注意训练时的NUM_CLASSES、
# model_path和classes_path参数的修改
#--------------------------------------------#
class FRCNN(object):
_defaults = {
"model_path" : 'model_data/voc_weights_resnet.pth',
"classes_path" : 'model_data/voc_classes.txt',
"confidence" : 0.5,
"iou" : 0.3,
"backbone" : "resnet50",
"cuda" : False,
}
@classmethod
def get_defaults(cls, n):
if n in cls._defaults:
return cls._defaults[n]
else:
return "Unrecognized attribute name '" + n + "'"
#---------------------------------------------------#
# 初始化faster RCNN
#---------------------------------------------------#
def __init__(self, **kwargs):
self.__dict__.update(self._defaults)
self.class_names = self._get_class()
self.generate()
self.mean = torch.Tensor([0, 0, 0, 0]).repeat(self.num_classes+1)[None]
self.std = torch.Tensor([0.1, 0.1, 0.2, 0.2]).repeat(self.num_classes+1)[None]
if self.cuda:
self.mean = self.mean.cuda()
self.std = self.std.cuda()
self.decodebox = DecodeBox(self.std, self.mean, self.num_classes)
#---------------------------------------------------#
# 获得所有的分类
#---------------------------------------------------#
def _get_class(self):
classes_path = os.path.expanduser(self.classes_path)
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
#---------------------------------------------------#
# 载入模型
#---------------------------------------------------#
def generate(self):
#-------------------------------#
# 计算总的类的数量
#-------------------------------#
self.num_classes = len(self.class_names)
#-------------------------------#
# 载入模型与权值
#-------------------------------#
self.model = FasterRCNN(self.num_classes,"predict",backbone=self.backbone).eval()
print('Loading weights into state dict...')
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
state_dict = torch.load(self.model_path, map_location=device)
self.model.load_state_dict(state_dict)
if self.cuda:
# self.model = nn.DataParallel(self.model)
self.model = self.model.cuda()
print('{} model, anchors, and classes loaded.'.format(self.model_path))
# 画框设置不同的颜色
hsv_tuples = [(x / len(self.class_names), 1., 1.)
for x in range(len(self.class_names))]
self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
self.colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
self.colors))
#---------------------------------------------------#
# 检测图片
#---------------------------------------------------#
def detect_image(self, image):
#-------------------------------------#
# 转换成RGB图片,可以用于灰度图预测。
#-------------------------------------#
image = image.convert("RGB")
image_shape = np.array(np.shape(image)[0:2])
old_width, old_height = image_shape[1], image_shape[0]
old_image = copy.deepcopy(image)
#---------------------------------------------------------#
# 给原图像进行resize,resize到短边为600的大小上
#---------------------------------------------------------#
width,height = get_new_img_size(old_width, old_height)
image = image.resize([width,height], Image.BICUBIC)
#-----------------------------------------------------------#
# 图片预处理,归一化。
#-----------------------------------------------------------#
photo = np.transpose(np.array(image,dtype = np.float32)/255, (2, 0, 1))
with torch.no_grad():
images = torch.from_numpy(np.asarray([photo]))
if self.cuda:
images = images.cuda()
roi_cls_locs, roi_scores, rois, _ = self.model(images)
#-------------------------------------------------------------#
# 利用classifier的预测结果对建议框进行解码,获得预测框
#-------------------------------------------------------------#
outputs = self.decodebox.forward(roi_cls_locs[0], roi_scores[0], rois, height = height, width = width, nms_iou = self.iou, score_thresh = self.confidence)
#---------------------------------------------------------#
# 如果没有检测出物体,返回原图
#---------------------------------------------------------#
if len(outputs)==0:
return old_image
outputs = np.array(outputs)
bbox = outputs[:,:4]
label = outputs[:, 4]
conf = outputs[:, 5]
bbox[:, 0::2] = (bbox[:, 0::2]) / width * old_width
bbox[:, 1::2] = (bbox[:, 1::2]) / height * old_height
font = ImageFont.truetype(font='model_data/simhei.ttf',size=np.floor(3e-2 * np.shape(image)[1] + 0.5).astype('int32'))
thickness = max((np.shape(old_image)[0] + np.shape(old_image)[1]) // old_width * 2, 1)
image = old_image
for i, c in enumerate(label):
predicted_class = self.class_names[int(c)]
score = conf[i]
left, top, right, bottom = bbox[i]
top = top - 5
left = left - 5
bottom = bottom + 5
right = right + 5
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(np.shape(image)[0], np.floor(bottom + 0.5).astype('int32'))
right = min(np.shape(image)[1], np.floor(right + 0.5).astype('int32'))
# 画框框
label = '{} {:.2f}'.format(predicted_class, score)
draw = ImageDraw.Draw(image)
label_size = draw.textsize(label, font)
label = label.encode('utf-8')
print(label, top, left, bottom, right)
if top - label_size[1] >= 0:
text_origin = np.array([left, top - label_size[1]])
else:
text_origin = np.array([left, top + 1])
for i in range(thickness):
draw.rectangle(
[left + i, top + i, right - i, bottom - i],
outline=self.colors[int(c)])
draw.rectangle(
[tuple(text_origin), tuple(text_origin + label_size)],
fill=self.colors[int(c)])
draw.text(text_origin, str(label,'UTF-8'), fill=(0, 0, 0), font=font)
del draw
return image
def get_FPS(self, image, test_interval):
#-------------------------------------#
# 转换成RGB图片,可以用于灰度图预测。
#-------------------------------------#
image = image.convert("RGB")
image_shape = np.array(np.shape(image)[0:2])
old_width, old_height = image_shape[1], image_shape[0]
#---------------------------------------------------------#
# 给原图像进行resize,resize到短边为600的大小上
#---------------------------------------------------------#
width,height = get_new_img_size(old_width, old_height)
image = image.resize([width,height], Image.BICUBIC)
#-----------------------------------------------------------#
# 图片预处理,归一化。
#-----------------------------------------------------------#
photo = np.transpose(np.array(image,dtype = np.float32)/255, (2, 0, 1))
with torch.no_grad():
images = torch.from_numpy(np.asarray([photo]))
if self.cuda:
images = images.cuda()
roi_cls_locs, roi_scores, rois, _ = self.model(images)
#-------------------------------------------------------------#
# 利用classifier的预测结果对建议框进行解码,获得预测框
#-------------------------------------------------------------#
outputs = self.decodebox.forward(roi_cls_locs[0], roi_scores[0], rois, height = height, width = width, nms_iou = self.iou, score_thresh = self.confidence)
#---------------------------------------------------------#
# 如果没有检测出物体,返回原图
#---------------------------------------------------------#
if len(outputs)>0:
outputs = np.array(outputs)
bbox = outputs[:,:4]
label = outputs[:, 4]
conf = outputs[:, 5]
bbox[:, 0::2] = (bbox[:, 0::2]) / width * old_width
bbox[:, 1::2] = (bbox[:, 1::2]) / height * old_height
t1 = time.time()
for _ in range(test_interval):
with torch.no_grad():
roi_cls_locs, roi_scores, rois, _ = self.model(images)
#-------------------------------------------------------------#
# 利用classifier的预测结果对建议框进行解码,获得预测框
#-------------------------------------------------------------#
outputs = self.decodebox.forward(roi_cls_locs[0], roi_scores[0], rois, height = height, width = width, nms_iou = self.iou, score_thresh = self.confidence)
#---------------------------------------------------------#
# 如果没有检测出物体,返回原图
#---------------------------------------------------------#
if len(outputs)>0:
outputs = np.array(outputs)
bbox = outputs[:,:4]
label = outputs[:, 4]
conf = outputs[:, 5]
bbox[:, 0::2] = (bbox[:, 0::2]) / width * old_width
bbox[:, 1::2] = (bbox[:, 1::2]) / height * old_height
t2 = time.time()
tact_time = (t2 - t1) / test_interval
return tact_time
|
[
"copy.deepcopy",
"colorsys.hsv_to_rgb",
"torch.load",
"numpy.asarray",
"numpy.floor",
"utils.utils.get_new_img_size",
"utils.utils.DecodeBox",
"time.time",
"numpy.shape",
"torch.Tensor",
"numpy.array",
"nets.frcnn.FasterRCNN",
"torch.cuda.is_available",
"PIL.ImageDraw.Draw",
"torch.no_grad",
"os.path.expanduser"
] |
[((1528, 1576), 'utils.utils.DecodeBox', 'DecodeBox', (['self.std', 'self.mean', 'self.num_classes'], {}), '(self.std, self.mean, self.num_classes)\n', (1537, 1576), False, 'from utils.utils import DecodeBox, get_new_img_size\n'), ((1759, 1796), 'os.path.expanduser', 'os.path.expanduser', (['self.classes_path'], {}), '(self.classes_path)\n', (1777, 1796), False, 'import os\n'), ((2611, 2659), 'torch.load', 'torch.load', (['self.model_path'], {'map_location': 'device'}), '(self.model_path, map_location=device)\n', (2621, 2659), False, 'import torch\n'), ((3752, 3772), 'copy.deepcopy', 'copy.deepcopy', (['image'], {}), '(image)\n', (3765, 3772), False, 'import copy\n'), ((3984, 4023), 'utils.utils.get_new_img_size', 'get_new_img_size', (['old_width', 'old_height'], {}), '(old_width, old_height)\n', (4000, 4023), False, 'from utils.utils import DecodeBox, get_new_img_size\n'), ((7738, 7777), 'utils.utils.get_new_img_size', 'get_new_img_size', (['old_width', 'old_height'], {}), '(old_width, old_height)\n', (7754, 7777), False, 'from utils.utils import DecodeBox, get_new_img_size\n'), ((9207, 9218), 'time.time', 'time.time', ([], {}), '()\n', (9216, 9218), False, 'import time\n'), ((10317, 10328), 'time.time', 'time.time', ([], {}), '()\n', (10326, 10328), False, 'import time\n'), ((4342, 4357), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4355, 4357), False, 'import torch\n'), ((5180, 5197), 'numpy.array', 'np.array', (['outputs'], {}), '(outputs)\n', (5188, 5197), True, 'import numpy as np\n'), ((6377, 6398), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['image'], {}), '(image)\n', (6391, 6398), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((8096, 8111), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8109, 8111), False, 'import torch\n'), ((2391, 2454), 'nets.frcnn.FasterRCNN', 'FasterRCNN', (['self.num_classes', '"""predict"""'], {'backbone': 'self.backbone'}), "(self.num_classes, 'predict', backbone=self.backbone)\n", (2401, 2454), False, 'from nets.frcnn import FasterRCNN\n'), ((2552, 2577), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2575, 2577), False, 'import torch\n'), ((3647, 3662), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (3655, 3662), True, 'import numpy as np\n'), ((4277, 4310), 'numpy.array', 'np.array', (['image'], {'dtype': 'np.float32'}), '(image, dtype=np.float32)\n', (4285, 4310), True, 'import numpy as np\n'), ((4397, 4416), 'numpy.asarray', 'np.asarray', (['[photo]'], {}), '([photo])\n', (4407, 4416), True, 'import numpy as np\n'), ((6628, 6665), 'numpy.array', 'np.array', (['[left, top - label_size[1]]'], {}), '([left, top - label_size[1]])\n', (6636, 6665), True, 'import numpy as np\n'), ((6714, 6739), 'numpy.array', 'np.array', (['[left, top + 1]'], {}), '([left, top + 1])\n', (6722, 6739), True, 'import numpy as np\n'), ((7442, 7457), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (7450, 7457), True, 'import numpy as np\n'), ((8031, 8064), 'numpy.array', 'np.array', (['image'], {'dtype': 'np.float32'}), '(image, dtype=np.float32)\n', (8039, 8064), True, 'import numpy as np\n'), ((8151, 8170), 'numpy.asarray', 'np.asarray', (['[photo]'], {}), '([photo])\n', (8161, 8170), True, 'import numpy as np\n'), ((8904, 8921), 'numpy.array', 'np.array', (['outputs'], {}), '(outputs)\n', (8912, 8921), True, 'import numpy as np\n'), ((9275, 9290), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9288, 9290), False, 'import torch\n'), ((1241, 1267), 'torch.Tensor', 'torch.Tensor', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (1253, 1267), False, 'import torch\n'), ((1320, 1354), 'torch.Tensor', 'torch.Tensor', (['[0.1, 0.1, 0.2, 0.2]'], {}), '([0.1, 0.1, 0.2, 0.2])\n', (1332, 1354), False, 'import torch\n'), ((3098, 3121), 'colorsys.hsv_to_rgb', 'colorsys.hsv_to_rgb', (['*x'], {}), '(*x)\n', (3117, 3121), False, 'import colorsys\n'), ((6133, 6148), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (6141, 6148), True, 'import numpy as np\n'), ((6217, 6232), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (6225, 6232), True, 'import numpy as np\n'), ((9998, 10015), 'numpy.array', 'np.array', (['outputs'], {}), '(outputs)\n', (10006, 10015), True, 'import numpy as np\n'), ((6007, 6026), 'numpy.floor', 'np.floor', (['(top + 0.5)'], {}), '(top + 0.5)\n', (6015, 6026), True, 'import numpy as np\n'), ((6070, 6090), 'numpy.floor', 'np.floor', (['(left + 0.5)'], {}), '(left + 0.5)\n', (6078, 6090), True, 'import numpy as np\n'), ((6153, 6175), 'numpy.floor', 'np.floor', (['(bottom + 0.5)'], {}), '(bottom + 0.5)\n', (6161, 6175), True, 'import numpy as np\n'), ((6237, 6258), 'numpy.floor', 'np.floor', (['(right + 0.5)'], {}), '(right + 0.5)\n', (6245, 6258), True, 'import numpy as np\n'), ((5583, 5602), 'numpy.shape', 'np.shape', (['old_image'], {}), '(old_image)\n', (5591, 5602), True, 'import numpy as np\n'), ((5608, 5627), 'numpy.shape', 'np.shape', (['old_image'], {}), '(old_image)\n', (5616, 5627), True, 'import numpy as np\n'), ((5514, 5529), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (5522, 5529), True, 'import numpy as np\n')]
|
# Added ICP, and also demonstrated multichannel
import pprint
import nidaqmx
from nidaqmx.constants import (
Edge, TriggerType, AcquisitionType, LineGrouping, Level, TaskMode)
pp = pprint.PrettyPrinter(indent=4)
sample_rate = 1000
number_of_samples = 1000
samp_clk_terminal = ""
task = nidaqmx.Task()
task2 = nidaqmx.Task()
#task.ai_channels.add_ai_voltage_chan("Dev3/ai0")
#task.timing.cfg_samp_clk_timing(sample_rate, source=samp_clk_terminal, active_edge=Edge.RISING,samps_per_chan=number_of_samples)
##task.ai_channels.all.ai_excit_val = 0.002
##task.ai_channels.all.ai_coupling = nidaqmx.constants.Coupling.AC
#task.ai_channels[0].ai_excit_val = 0.002
#task.ai_channels[0].ai_coupling = nidaqmx.constants.Coupling.AC
#print('1 Channel 1 Sample Read: ')
#data = task.read()
#pp.pprint(data)
#data = task.read(number_of_samples_per_channel=1)
#pp.pprint(data)
#print('1 Channel N Samples Read: ')
#data = task.read(number_of_samples_per_channel=8)
#pp.pprint(data)
task2.ai_channels.add_ai_voltage_chan("Dev3/ai2:3")
task2.timing.cfg_samp_clk_timing(sample_rate, source=samp_clk_terminal, active_edge=Edge.RISING,samps_per_chan=number_of_samples)
task2.ai_channels.all.ai_excit_val = 0.002
task2.ai_channels.all.ai_coupling = nidaqmx.constants.Coupling.AC
print('N Channel 1 Sample Read: ')
data = task2.read()
pp.pprint(data)
print('N Channel N Samples Read: ')
data = task2.read(number_of_samples_per_channel=8)
pp.pprint(data)
|
[
"nidaqmx.Task",
"pprint.PrettyPrinter"
] |
[((186, 216), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {'indent': '(4)'}), '(indent=4)\n', (206, 216), False, 'import pprint\n'), ((293, 307), 'nidaqmx.Task', 'nidaqmx.Task', ([], {}), '()\n', (305, 307), False, 'import nidaqmx\n'), ((316, 330), 'nidaqmx.Task', 'nidaqmx.Task', ([], {}), '()\n', (328, 330), False, 'import nidaqmx\n')]
|
import os
version_info = (0, 22, 0)
__version__ = '.'.join(map(str, version_info))
# This is to make Debian packaging easier, it ignores import
# errors of greenlet so that the packager can still at least
# access the version. Also this makes easy_install a little quieter
if os.environ.get('EVENTLET_IMPORT_VERSION_ONLY') != '1':
from eventlet import convenience
from eventlet import event
from eventlet import greenpool
from eventlet import greenthread
from eventlet import patcher
from eventlet import queue
from eventlet import semaphore
from eventlet import support
from eventlet import timeout
import greenlet
# Force monotonic library search as early as possible.
# Helpful when CPython < 3.5 on Linux blocked in `os.waitpid(-1)` before first use of hub.
# Example: gunicorn
# https://github.com/eventlet/eventlet/issues/401#issuecomment-327500352
from eventlet.support import monotonic
del monotonic
connect = convenience.connect
listen = convenience.listen
serve = convenience.serve
StopServe = convenience.StopServe
wrap_ssl = convenience.wrap_ssl
Event = event.Event
GreenPool = greenpool.GreenPool
GreenPile = greenpool.GreenPile
sleep = greenthread.sleep
spawn = greenthread.spawn
spawn_n = greenthread.spawn_n
spawn_after = greenthread.spawn_after
kill = greenthread.kill
import_patched = patcher.import_patched
monkey_patch = patcher.monkey_patch
Queue = queue.Queue
Semaphore = semaphore.Semaphore
CappedSemaphore = semaphore.CappedSemaphore
BoundedSemaphore = semaphore.BoundedSemaphore
Timeout = timeout.Timeout
with_timeout = timeout.with_timeout
wrap_is_timeout = timeout.wrap_is_timeout
is_timeout = timeout.is_timeout
getcurrent = greenlet.greenlet.getcurrent
# deprecated
TimeoutError, exc_after, call_after_global = (
support.wrap_deprecated(old, new)(fun) for old, new, fun in (
('TimeoutError', 'Timeout', Timeout),
('exc_after', 'greenthread.exc_after', greenthread.exc_after),
('call_after_global', 'greenthread.call_after_global', greenthread.call_after_global),
))
del os
|
[
"os.environ.get",
"eventlet.support.wrap_deprecated"
] |
[((279, 325), 'os.environ.get', 'os.environ.get', (['"""EVENTLET_IMPORT_VERSION_ONLY"""'], {}), "('EVENTLET_IMPORT_VERSION_ONLY')\n", (293, 325), False, 'import os\n'), ((1931, 1964), 'eventlet.support.wrap_deprecated', 'support.wrap_deprecated', (['old', 'new'], {}), '(old, new)\n', (1954, 1964), False, 'from eventlet import support\n')]
|
from copy import copy
from typing import Optional
import numpy as np
import pandas as pd
from fedot.core.log import Log, default_log
from fedot.core.repository.tasks import Task, TaskTypesEnum
NAME_CLASS_STR = "<class 'str'>"
NAME_CLASS_INT = "<class 'int'>"
NAME_CLASS_FLOAT = "<class 'float'>"
NAME_CLASS_NONE = "<class 'NoneType'>"
FEDOT_STR_NAN = 'fedot_nan'
# If unique values in the feature column is less than 13 - convert column into string type
CATEGORICAL_UNIQUE_TH = 13
MAX_CATEGORIES_TH = 30
class TableTypesCorrector:
"""
Class for checking types in input data. Also perform conversion for columns with types conflicts
"""
def __init__(self, log: Optional[Log] = None):
# Maximum allowed unique categories in categorical table (if more - transform it into float)
self.categorical_max_classes_th = MAX_CATEGORIES_TH
# Threshold to convert numerical into categorical column
self.numerical_min_uniques = CATEGORICAL_UNIQUE_TH
self.features_columns_info = {}
self.target_columns_info = {}
# Dictionary with information about converted during fitting columns
self.features_converted_columns = {}
self.target_converted_columns = {}
# Columns to delete due to types conflicts
self.columns_to_del = []
# Column ids for transformation due to number of unique values
self.numerical_into_str = []
self.categorical_into_float = []
# Indices of columns with filed string into numerical transformation
self.string_columns_transformation_failed = {}
# Is target column contains non-numerical cells during conversion
self.target_converting_has_errors = False
# Lists with column types for converting calculated on source input data
self.features_types = None
self.target_types = None
self.log = log or default_log(__name__)
def convert_data_for_fit(self, data: 'InputData'):
""" If column contain several data types - perform correction procedure """
# Convert features to have an ability to insert str into float table or vice versa
data.features = data.features.astype(object)
# Determine types for each column in features and target if it is necessary
self.features_columns_info = define_column_types(data.features)
self.target_columns_info = define_column_types(data.target)
# Correct types in features table
data.features = self.features_types_converting(features=data.features)
# Remain only correct columns
data.features = self.remove_incorrect_features(data.features, self.features_converted_columns)
# And in target(s)
data.target = self.target_types_converting(target=data.target, task=data.task)
data.supplementary_data.column_types = self.prepare_column_types_info(predictors=data.features,
target=data.target,
task=data.task)
# Launch conversion float and integer features into categorical
self._into_categorical_features_transformation_for_fit(data)
self._into_numeric_features_transformation_for_fit(data)
# Save info about features and target types
self.features_types = copy(data.supplementary_data.column_types['features'])
self.target_types = copy(data.supplementary_data.column_types['target'])
self._retain_columns_info_without_types_conflicts(data)
return data
def convert_data_for_predict(self, data: 'InputData'):
""" Prepare data for predict stage. Include only column types transformation """
# Ordering is important because after removing incorrect features - indices are obsolete
data.features = data.features.astype(object)
data.features = self.remove_incorrect_features(data.features, self.features_converted_columns)
data.features = apply_type_transformation(data.features, self.features_types, self.log)
data.target = apply_type_transformation(data.target, self.target_types, self.log)
data.supplementary_data.column_types = self.prepare_column_types_info(predictors=data.features,
target=data.target,
task=data.task)
# Convert column types
self._into_categorical_features_transformation_for_predict(data)
self._into_numeric_features_transformation_for_predict(data)
self._retain_columns_info_without_types_conflicts(data)
return data
def remove_incorrect_features(self, table: np.array, converted_columns: dict):
"""
Remove from the table columns with conflicts with types were not resolved
:param table: tabular dataset based on which new dataset will be generated
:param converted_columns: dictionary with actions with table
"""
if not converted_columns:
return table
self.columns_to_del = [column_id for column_id, new_type_name in converted_columns.items() if
new_type_name == 'removed']
if not self.columns_to_del:
# There are no columns to delete
return table
# Remove all "bad" columns
table = np.delete(table, self.columns_to_del, 1)
return table
def features_types_converting(self, features: np.array) -> np.array:
""" Convert all elements in the data in every feature column into one type
:param features: tabular features array
"""
features_with_mixed_types = find_mixed_types_columns(self.features_columns_info)
if not features_with_mixed_types:
return features
# There are mixed-types columns in features table - convert them
for mixed_column_id in features_with_mixed_types:
column_info = self.features_columns_info[mixed_column_id]
if column_info.get('str_number') > 0:
# There are string elements in the array
mixed_column = features[:, mixed_column_id]
updated_column, new_type_name = self._convert_feature_into_one_type(mixed_column, column_info,
mixed_column_id)
# Store information about converted columns
self.features_converted_columns.update({mixed_column_id: new_type_name})
if updated_column is not None:
features[:, mixed_column_id] = updated_column
return features
def target_types_converting(self, target: np.array, task: Task) -> np.array:
""" Convert all elements in every target column into one type
:param target: tabular target array
:param task: task to solve
"""
target_with_mixed_types = find_mixed_types_columns(self.target_columns_info)
if not target_with_mixed_types:
return target
# There are mixed-types columns in features table - convert them
for mixed_column_id in target_with_mixed_types:
column_info = self.target_columns_info[mixed_column_id]
if column_info.get('str_number') > 0:
# There are string elements in the array
mixed_column = target[:, mixed_column_id]
updated_column, new_type_name = self._convert_target_into_one_type(mixed_column, column_info,
mixed_column_id, task)
# Store information about converted columns
self.target_converted_columns.update({mixed_column_id: new_type_name})
if updated_column is not None:
target[:, mixed_column_id] = updated_column
return target
def prepare_column_types_info(self, predictors: np.array, target: np.array = None,
task: Task = None) -> dict:
""" Prepare information about columns in a form of dictionary
Dictionary has two keys: 'target' and 'features'
"""
if not self.features_columns_info:
# Information about column types is empty - there is a need to launch algorithm to collect info
self.features_columns_info = define_column_types(predictors)
predictors = self.features_types_converting(features=predictors)
if not self.target_columns_info and task.task_type is not TaskTypesEnum.ts_forecasting:
self.target_columns_info = define_column_types(target)
target = self.target_types_converting(target=target, task=task)
features_types = _generate_list_with_types(self.features_columns_info, self.features_converted_columns)
self._check_columns_vs_types_number(predictors, features_types)
if target is None or task.task_type is TaskTypesEnum.ts_forecasting:
return {'features': features_types}
else:
target_types = _generate_list_with_types(self.target_columns_info, self.target_converted_columns)
self._check_columns_vs_types_number(target, target_types)
return {'features': features_types, 'target': target_types}
def _retain_columns_info_without_types_conflicts(self, data: 'InputData'):
""" Update information in supplementary info - retain info only about remained columns.
Such columns have no conflicts with types converting.
"""
if len(self.string_columns_transformation_failed) > 0:
self.log.warn(f'Columns with indices {list(self.string_columns_transformation_failed.keys())} were '
f'removed during mixed types column converting due to conflicts.')
data.features = self.remove_incorrect_features(data.features, self.string_columns_transformation_failed)
remained_column_types = []
for i, col in enumerate(data.supplementary_data.column_types['features']):
if i not in self.string_columns_transformation_failed:
remained_column_types.append(col)
data.supplementary_data.column_types['features'] = remained_column_types
def _check_columns_vs_types_number(self, table: np.array, column_types: list):
# Check if columns number correct
n_rows, n_cols = table.shape
if n_cols != len(column_types):
# There is an incorrect types calculation
self.log.warn('Columns number and types numbers do not match.')
def _convert_feature_into_one_type(self, mixed_column: np.array, column_info: dict, mixed_column_id: int):
""" Determine new type for current feature column based on the string ratio. And then convert column into it.
:param mixed_column: one-dimensional array with several data types
:param column_info: dictionary with information about types in the column
:param mixed_column_id: index of column in dataset
"""
if len(column_info['types']) == 2 and NAME_CLASS_NONE in column_info['types']:
# Column contain only one data type and nans
filtered_types = [x for x in column_info['types'] if x != NAME_CLASS_NONE]
return mixed_column, filtered_types[0]
string_objects_number = column_info['str_number']
all_elements_number = string_objects_number + column_info['int_number'] + column_info['float_number']
string_ratio = string_objects_number / all_elements_number
if string_ratio > 0.5:
suggested_type = str
else:
suggested_type = _obtain_new_column_type(column_info)
try:
mixed_column = mixed_column.astype(suggested_type)
# If there were nans in the column - paste nan
if column_info['nan_number'] > 0:
mixed_column = mixed_column.astype(object)
mixed_column[column_info['nan_ids']] = np.nan
del column_info['nan_ids']
return mixed_column, str(suggested_type)
except ValueError:
# Cannot convert string objects into int or float (for example 'a' into int)
prefix = f'Feature column with index {mixed_column_id} contains ' \
f'following data types: {column_info["types"]}.'
self.log.warn(f'{prefix} String cannot be converted into {suggested_type}. Drop column.')
return None, 'removed'
def _convert_target_into_one_type(self, mixed_column: np.array, column_info: dict, mixed_column_id: int,
task: Task) -> [np.array, str]:
""" Convert target columns into one type based on column proportions of object and task """
if task.task_type is TaskTypesEnum.classification:
# For classification labels are string if at least one element is a string
suggested_type = str
else:
suggested_type = _obtain_new_column_type(column_info)
try:
mixed_column = mixed_column.astype(suggested_type)
return mixed_column, str(suggested_type)
except ValueError:
# Cannot convert string objects into int or float (for example 'a' into int)
target_column = pd.Series(mixed_column)
converted_column = pd.to_numeric(target_column, errors='coerce')
prefix = f'Target column with index {mixed_column_id} contains ' \
f'following data types: {column_info["types"]}.'
log_message = f'{prefix} String cannot be converted into {suggested_type}. Ignore non converted values.'
self.log.debug(log_message)
self.target_converting_has_errors = True
return converted_column.values, str(suggested_type)
def _into_categorical_features_transformation_for_fit(self, data: 'InputData'):
"""
Perform automated categorical features determination. If feature column
contains int or float values with few unique values (less than 13)
"""
n_rows, n_cols = data.features.shape
for column_id in range(n_cols):
# For every int/float column perform check
column_type = data.supplementary_data.column_types['features'][column_id]
if 'int' in column_type or 'float' in column_type:
numerical_column = pd.Series(data.features[:, column_id])
# Calculate number of unique values except nans
unique_numbers = len(numerical_column.dropna().unique())
if 2 < unique_numbers < self.numerical_min_uniques:
# Column need to be transformed into categorical (string) one
self.numerical_into_str.append(column_id)
# Convert into string
converted_array = convert_num_column_into_string_array(numerical_column)
# Store converted column into features table
data.features[:, column_id] = converted_array
# Update information about column types (in-place)
features_types = data.supplementary_data.column_types['features']
features_types[column_id] = NAME_CLASS_STR
def _into_categorical_features_transformation_for_predict(self, data: 'InputData'):
""" Apply conversion into categorical string column for every signed column """
if not self.numerical_into_str:
# There is no transformation for current table
return data
n_rows, n_cols = data.features.shape
for column_id in range(n_cols):
if column_id in self.numerical_into_str:
numerical_column = pd.Series(data.features[:, column_id])
# Column must be converted into categorical
converted_array = convert_num_column_into_string_array(numerical_column)
data.features[:, column_id] = converted_array
# Update information about column types (in-place)
features_types = data.supplementary_data.column_types['features']
features_types[column_id] = NAME_CLASS_STR
def _into_numeric_features_transformation_for_fit(self, data: 'InputData'):
"""
Automatically determine categorical features which should be converted into float
"""
n_rows, n_cols = data.features.shape
for column_id in range(n_cols):
# For every string column perform converting if necessary
column_type = data.supplementary_data.column_types['features'][column_id]
if 'str' in column_type:
string_column = pd.Series(data.features[:, column_id])
unique_numbers = len(string_column.dropna().unique())
if unique_numbers > self.categorical_max_classes_th:
# Number of nans in the column
nans_number = string_column.isna().sum()
# Column probably not an "actually categorical" but a column with an incorrectly defined type
converted_column = pd.to_numeric(string_column, errors='coerce')
# Calculate applied nans
result_nans_number = converted_column.isna().sum()
failed_objects_number = result_nans_number - nans_number
non_nan_all_objects_number = n_rows - nans_number
failed_ratio = failed_objects_number / non_nan_all_objects_number
# If all objects are truly strings - all objects transform into nan
is_column_contain_numerical_objects = failed_ratio != 1
if failed_ratio < 0.5:
# The majority of objects can be converted into numerical
data.features[:, column_id] = converted_column.values
# Update information about column types (in-place)
self.categorical_into_float.append(column_id)
features_types = data.supplementary_data.column_types['features']
features_types[column_id] = NAME_CLASS_FLOAT
elif failed_ratio >= 0.5 and is_column_contain_numerical_objects:
# Probably numerical column contains '?' or 'x' as nans equivalents
# Add columns to remove list
self.string_columns_transformation_failed.update({column_id: 'removed'})
def _into_numeric_features_transformation_for_predict(self, data: 'InputData'):
""" Apply conversion into float string column for every signed column """
if not self.categorical_into_float:
# There is no transformation for current table
return data
n_rows, n_cols = data.features.shape
for column_id in range(n_cols):
if column_id in self.categorical_into_float and column_id not in self.string_columns_transformation_failed:
string_column = pd.Series(data.features[:, column_id])
# Column must be converted into float from categorical
converted_column = pd.to_numeric(string_column, errors='coerce')
data.features[:, column_id] = converted_column.values
# Update information about column types (in-place)
features_types = data.supplementary_data.column_types['features']
features_types[column_id] = NAME_CLASS_FLOAT
def define_column_types(table: np.array):
""" Prepare information about types per columns. For each column store unique
types, which column contains. If column with mixed type contain str object
additional field 'str_ids' with indices of string objects is prepared
"""
# TODO: current processing is relatively computationally expensive - probably refactor needed
def type_ignoring_nans(item):
""" Return type of element in the array. If item is np.nan - return NoneType """
current_type = type(item)
if current_type is float and np.isnan(item):
# Check is current element is nan or not (np.nan is a float type)
return type(None)
return current_type
if table is None:
return {}
n_rows, n_columns = table.shape
columns_info = {}
for column_id in range(n_columns):
current_column = table[:, column_id]
# Check every element in numpy array - it can take a long time!
column_types = list(map(type_ignoring_nans, current_column))
# Store only unique values
set_column_types = set(column_types)
# Convert types into string names
column_types_names = list(map(str, set_column_types))
if len(column_types_names) > 1:
# There are several types in one column
types_names = np.array(column_types, dtype=str)
# Calculate number of string objects in the dataset
str_number = len(np.argwhere(types_names == NAME_CLASS_STR))
int_number = len(np.argwhere(types_names == NAME_CLASS_INT))
float_number = len(np.argwhere(types_names == NAME_CLASS_FLOAT))
# Store information about nans in the target
nan_ids = np.ravel(np.argwhere(types_names == NAME_CLASS_NONE))
nan_number = len(nan_ids)
columns_info.update({column_id: {'types': column_types_names,
'str_number': str_number,
'int_number': int_number,
'float_number': float_number,
'nan_number': nan_number,
'nan_ids': nan_ids}})
else:
# There is only one type, or several types such as int and float
columns_info.update({column_id: {'types': column_types_names}})
return columns_info
def find_mixed_types_columns(columns_info: dict):
""" Search for columns with several types in them """
columns_with_mixed_types = []
for column_id, information in columns_info.items():
column_types = information['types']
if len(column_types) > 1:
columns_with_mixed_types.append(column_id)
return columns_with_mixed_types
def apply_type_transformation(table: np.array, column_types: list, log: Log):
"""
Apply transformation for columns in dataset into desired type. Perform
transformation on predict stage when column types were already determined
during fit
"""
def type_by_name(current_type_name: str):
""" Return type by it's name """
if 'int' in current_type_name:
return int
elif 'str' in current_type_name:
return str
else:
return float
if table is None:
# Occurs if for predict stage there is no target info
return None
n_rows, n_cols = table.shape
for column_id in range(n_cols):
current_column = table[:, column_id]
current_type = type_by_name(column_types[column_id])
try:
table[:, column_id] = current_column.astype(current_type)
except ValueError as ex:
log.debug(f'Cannot convert column with id {column_id} into type {current_type} due to {ex}')
message = str(ex)
if 'NaN' not in message:
# Try to convert column from string into float
unseen_label = message.split("\'")[1]
if ',' in unseen_label:
# Most likely case: '20,000' must be converted into '20.000'
err = f'Column {column_id} contains both "." and ",". Standardize it.'
raise ValueError(err)
else:
# Most likely case: ['a', '1.5'] -> [np.nan, 1.5]
label_ids = np.ravel(np.argwhere(current_column == unseen_label))
current_column[label_ids] = np.nan
table[:, column_id] = current_column.astype(float)
return table
def convert_num_column_into_string_array(numerical_column: pd.Series) -> np.array:
""" Convert pandas column into numpy one-dimensional array """
# Convert into string
converted_column = numerical_column.astype(str)
converted_array = converted_column.values
# If there are nans - insert them
nan_ids = np.ravel(np.argwhere(converted_array == 'nan'))
if len(nan_ids) > 0:
converted_array = converted_array.astype(object)
converted_array[nan_ids] = np.nan
return converted_array
def _obtain_new_column_type(column_info):
""" Suggest in or float type based on the presence of nan and float values """
if column_info['float_number'] > 0 or column_info['nan_number'] > 0:
# Even if one of types are float - all elements should be converted into float
return float
else:
# It is available to convert numerical into integer type
return int
def _generate_list_with_types(columns_types_info: dict, converted_columns: dict) -> list:
""" Create list with types for all remained columns
:param columns_types_info: dictionary with initial column types
:param converted_columns: dictionary with transformed column types
"""
updated_column_types = []
for column_id, column_info in columns_types_info.items():
column_types = column_info['types']
if len(column_types) == 1:
# Column initially contain only one type
updated_column_types.append(column_types[0])
elif len(column_types) == 2 and NAME_CLASS_NONE in column_types:
# Column with one type and nans
filtered_types = [x for x in column_types if x != NAME_CLASS_NONE]
updated_column_types.append(filtered_types[0])
else:
if any('str' in column_type_name for column_type_name in column_types):
# Mixed-types column with string
new_column_type = converted_columns[column_id]
if new_column_type != 'removed':
updated_column_types.append(new_column_type)
else:
# Mixed-types with float and integer
updated_column_types.append(NAME_CLASS_FLOAT)
return updated_column_types
|
[
"copy.copy",
"numpy.isnan",
"numpy.array",
"pandas.Series",
"numpy.argwhere",
"fedot.core.log.default_log",
"numpy.delete",
"pandas.to_numeric"
] |
[((3397, 3451), 'copy.copy', 'copy', (["data.supplementary_data.column_types['features']"], {}), "(data.supplementary_data.column_types['features'])\n", (3401, 3451), False, 'from copy import copy\n'), ((3480, 3532), 'copy.copy', 'copy', (["data.supplementary_data.column_types['target']"], {}), "(data.supplementary_data.column_types['target'])\n", (3484, 3532), False, 'from copy import copy\n'), ((5482, 5522), 'numpy.delete', 'np.delete', (['table', 'self.columns_to_del', '(1)'], {}), '(table, self.columns_to_del, 1)\n', (5491, 5522), True, 'import numpy as np\n'), ((24740, 24777), 'numpy.argwhere', 'np.argwhere', (["(converted_array == 'nan')"], {}), "(converted_array == 'nan')\n", (24751, 24777), True, 'import numpy as np\n'), ((1903, 1924), 'fedot.core.log.default_log', 'default_log', (['__name__'], {}), '(__name__)\n', (1914, 1924), False, 'from fedot.core.log import Log, default_log\n'), ((20357, 20371), 'numpy.isnan', 'np.isnan', (['item'], {}), '(item)\n', (20365, 20371), True, 'import numpy as np\n'), ((21139, 21172), 'numpy.array', 'np.array', (['column_types'], {'dtype': 'str'}), '(column_types, dtype=str)\n', (21147, 21172), True, 'import numpy as np\n'), ((13500, 13523), 'pandas.Series', 'pd.Series', (['mixed_column'], {}), '(mixed_column)\n', (13509, 13523), True, 'import pandas as pd\n'), ((13555, 13600), 'pandas.to_numeric', 'pd.to_numeric', (['target_column'], {'errors': '"""coerce"""'}), "(target_column, errors='coerce')\n", (13568, 13600), True, 'import pandas as pd\n'), ((14613, 14651), 'pandas.Series', 'pd.Series', (['data.features[:, column_id]'], {}), '(data.features[:, column_id])\n', (14622, 14651), True, 'import pandas as pd\n'), ((15966, 16004), 'pandas.Series', 'pd.Series', (['data.features[:, column_id]'], {}), '(data.features[:, column_id])\n', (15975, 16004), True, 'import pandas as pd\n'), ((16930, 16968), 'pandas.Series', 'pd.Series', (['data.features[:, column_id]'], {}), '(data.features[:, column_id])\n', (16939, 16968), True, 'import pandas as pd\n'), ((19303, 19341), 'pandas.Series', 'pd.Series', (['data.features[:, column_id]'], {}), '(data.features[:, column_id])\n', (19312, 19341), True, 'import pandas as pd\n'), ((19449, 19494), 'pandas.to_numeric', 'pd.to_numeric', (['string_column'], {'errors': '"""coerce"""'}), "(string_column, errors='coerce')\n", (19462, 19494), True, 'import pandas as pd\n'), ((21266, 21308), 'numpy.argwhere', 'np.argwhere', (['(types_names == NAME_CLASS_STR)'], {}), '(types_names == NAME_CLASS_STR)\n', (21277, 21308), True, 'import numpy as np\n'), ((21339, 21381), 'numpy.argwhere', 'np.argwhere', (['(types_names == NAME_CLASS_INT)'], {}), '(types_names == NAME_CLASS_INT)\n', (21350, 21381), True, 'import numpy as np\n'), ((21414, 21458), 'numpy.argwhere', 'np.argwhere', (['(types_names == NAME_CLASS_FLOAT)'], {}), '(types_names == NAME_CLASS_FLOAT)\n', (21425, 21458), True, 'import numpy as np\n'), ((21549, 21592), 'numpy.argwhere', 'np.argwhere', (['(types_names == NAME_CLASS_NONE)'], {}), '(types_names == NAME_CLASS_NONE)\n', (21560, 21592), True, 'import numpy as np\n'), ((17375, 17420), 'pandas.to_numeric', 'pd.to_numeric', (['string_column'], {'errors': '"""coerce"""'}), "(string_column, errors='coerce')\n", (17388, 17420), True, 'import pandas as pd\n'), ((24213, 24256), 'numpy.argwhere', 'np.argwhere', (['(current_column == unseen_label)'], {}), '(current_column == unseen_label)\n', (24224, 24256), True, 'import numpy as np\n')]
|
"""rv_bis_corr.
Author: <NAME>
Calculate and plot RV vs BIS correlation
"""
import numpy as np
import statsmodels.api as sm
from scipy.stats import pearsonr
import scipy.stats as st
import matplotlib.pyplot as plt
def rv_bis_corr(data, confidence=0.05, name='last'):
"""Calculate RV vs BIS correlation and plot it.
Parameters
----------
data : dict
A dictionary containing the datasets. Each dataset must be an array
of size (5, m) in the following order: t, x, y, xerr, yerr.
confidence : float
The confidence level.
name : str, optional
Target name for saving the plot.
"""
# Linear Model fitting
tlow = np.inf
tup = -np.inf
x = np.array([])
y = np.array([])
for key in data.keys():
tl = data[key][:, 0].min()
tu = data[key][:, 0].max()
if tl < tlow:
tlow = tl
if tu > tup:
tup = tu
x = np.concatenate((x, data[key][:, 1]))
y = np.concatenate((y, data[key][:, 3]))
r, p_val = pearsonr(x, y)
X = sm.add_constant(x)
model = sm.OLS(y, X)
fitted = model.fit()
error_kwargs = {'lw': .75, 'zorder': 0}
# Confidence interval calculation
y_hat = fitted.predict(X)
y_err = y - y_hat
x_mean = X.T[1].mean()
n = len(x)
dof = n - fitted.df_model - 1 # Degrees of freedom
# 2 tailed t-stat calculation
t = st.t.ppf(1 - confidence / 2, df=dof)
s_err = np.sum(np.power(y_err, 2))
markers = ['o', 'v', '^', '>', '<', '8', 's', 'p', 'H', 'D', '*', 'd']
f, ax = plt.subplots(figsize=(20, 10))
ims = []
for i, key in enumerate(data.keys()):
x = data[key][:, 1]
y = data[key][:, 3]
xerr = data[key][:, 2]
yerr = data[key][:, 4]
ti = data[key][:, 0]
im = ax.scatter(
x, y, marker=markers[i], edgecolors='k', c=ti, cmap='cool_r', s=180
)
ims.append(im)
ax.errorbar(
x, y, xerr=xerr, yerr=yerr, marker=None,
linestyle='', ecolor='k', **error_kwargs
)
for im in ims:
im.set_clim(tlow, tup)
xmin, xmax = ax.get_xlim()
x_pred = np.linspace(xmin, xmax, 1000)
x_pred2 = sm.add_constant(x_pred)
y_pred = fitted.predict(x_pred2)
conf = t * np.sqrt((s_err / (n - 2)) *
(1. / n + (np.power((x_pred - x_mean), 2) /
((np.sum(np.power(x_pred, 2))) - n *
(np.power(x_mean, 2))))))
upper = y_pred + abs(conf)
lower = y_pred - abs(conf)
cb = f.colorbar(ims[-1], pad=.005)
lab = 'Pearson r: {:.3f}'.format(r)
ax.plot(x_pred, y_pred, '-', color='midnightblue', linewidth=2, label=lab)
ax.fill_between(x_pred, lower, upper, color='#888888', alpha=0.4)
ax.set_xlim(xmin, xmax)
ax.set_xlabel('RV (km s$^{-1}$)', fontsize=30)
ax.set_ylabel('Bisector Velocity Span (km s$^{-1}$)', fontsize=30)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(28)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(28)
cb.ax.tick_params(labelsize=28)
cb.set_label('JD - 2450000', rotation=270, labelpad=25, fontsize=30)
fname = '{}_bisector_rv.pdf'.format(name)
plt.legend(loc=0, prop={'size': 28})
plt.savefig(fname, bbox_inches='tight')
return fitted
|
[
"numpy.concatenate",
"statsmodels.api.OLS",
"numpy.power",
"matplotlib.pyplot.legend",
"scipy.stats.pearsonr",
"numpy.array",
"numpy.linspace",
"statsmodels.api.add_constant",
"scipy.stats.t.ppf",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
] |
[((716, 728), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (724, 728), True, 'import numpy as np\n'), ((737, 749), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (745, 749), True, 'import numpy as np\n'), ((1048, 1062), 'scipy.stats.pearsonr', 'pearsonr', (['x', 'y'], {}), '(x, y)\n', (1056, 1062), False, 'from scipy.stats import pearsonr\n'), ((1072, 1090), 'statsmodels.api.add_constant', 'sm.add_constant', (['x'], {}), '(x)\n', (1087, 1090), True, 'import statsmodels.api as sm\n'), ((1103, 1115), 'statsmodels.api.OLS', 'sm.OLS', (['y', 'X'], {}), '(y, X)\n', (1109, 1115), True, 'import statsmodels.api as sm\n'), ((1418, 1454), 'scipy.stats.t.ppf', 'st.t.ppf', (['(1 - confidence / 2)'], {'df': 'dof'}), '(1 - confidence / 2, df=dof)\n', (1426, 1454), True, 'import scipy.stats as st\n'), ((1583, 1613), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (1595, 1613), True, 'import matplotlib.pyplot as plt\n'), ((2187, 2216), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', '(1000)'], {}), '(xmin, xmax, 1000)\n', (2198, 2216), True, 'import numpy as np\n'), ((2231, 2254), 'statsmodels.api.add_constant', 'sm.add_constant', (['x_pred'], {}), '(x_pred)\n', (2246, 2254), True, 'import statsmodels.api as sm\n'), ((3295, 3331), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(0)', 'prop': "{'size': 28}"}), "(loc=0, prop={'size': 28})\n", (3305, 3331), True, 'import matplotlib.pyplot as plt\n'), ((3336, 3375), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {'bbox_inches': '"""tight"""'}), "(fname, bbox_inches='tight')\n", (3347, 3375), True, 'import matplotlib.pyplot as plt\n'), ((946, 982), 'numpy.concatenate', 'np.concatenate', (['(x, data[key][:, 1])'], {}), '((x, data[key][:, 1]))\n', (960, 982), True, 'import numpy as np\n'), ((995, 1031), 'numpy.concatenate', 'np.concatenate', (['(y, data[key][:, 3])'], {}), '((y, data[key][:, 3]))\n', (1009, 1031), True, 'import numpy as np\n'), ((1474, 1492), 'numpy.power', 'np.power', (['y_err', '(2)'], {}), '(y_err, 2)\n', (1482, 1492), True, 'import numpy as np\n'), ((2370, 2398), 'numpy.power', 'np.power', (['(x_pred - x_mean)', '(2)'], {}), '(x_pred - x_mean, 2)\n', (2378, 2398), True, 'import numpy as np\n'), ((2446, 2465), 'numpy.power', 'np.power', (['x_pred', '(2)'], {}), '(x_pred, 2)\n', (2454, 2465), True, 'import numpy as np\n'), ((2510, 2529), 'numpy.power', 'np.power', (['x_mean', '(2)'], {}), '(x_mean, 2)\n', (2518, 2529), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
import logging
from .input_validation import validate_cfg
from .run import run_instance
logger = logging.getLogger(__name__)
import multiprocessing as mp
"""Main module."""
def create_dicom(cfg, pools=-1):
"""
Main function for creating DICOM files
:param cfg: dictionary containing all required variables
:param pools: how many processors to use
:return: 0
"""
logger.debug('Beginning validation')
validate_cfg(cfg)
logger.info('All inputs are valid')
number_of_levels = int(cfg.get('General').get('NumberOfLevels'))
if pools < 0:
pool = mp.Pool(mp.cpu_count())
logger.debug(f'Using {mp.cpu_count()} CPUs')
else:
pool = mp.Pool(pools)
logger.debug(f'Using {pools} CPUs')
results = pool.starmap(run_instance, [(i, cfg) for i in reversed(range(number_of_levels))])
pool.close()
|
[
"multiprocessing.cpu_count",
"logging.getLogger",
"multiprocessing.Pool"
] |
[((123, 150), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (140, 150), False, 'import logging\n'), ((723, 737), 'multiprocessing.Pool', 'mp.Pool', (['pools'], {}), '(pools)\n', (730, 737), True, 'import multiprocessing as mp\n'), ((629, 643), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (641, 643), True, 'import multiprocessing as mp\n'), ((675, 689), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (687, 689), True, 'import multiprocessing as mp\n')]
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import os
import sys
import json
import argparse
import datetime
import subprocess
# In[ ]:
CONFIG = '/mnt/data/script/config.json'
DUMP = 'pg_dump -O -f {0}'
RESTORE = 'psql -f {0}'
GZIP = 'gzip -f {0}'
GUNZIP = 'gzip -df {0}'
COPY = 'cp -f {0} {1}'
# In[ ]:
def __build_path(param: dict) -> str:
timenow = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
return os.path.join(param['folder'], param['name'].format(timenow))
# In[ ]:
def __list_backups(param: dict) -> list:
backups = list()
entries = os.listdir(param['folder'])
entries.sort()
for file in reversed(entries):
if file.endswith('.gz'):
backups.append(os.path.join(param['folder'], file))
return backups
# In[ ]:
def main():
result = 1;
parser = argparse.ArgumentParser(description='Utility to dump/restore postgres database')
parser.add_argument('--dump', action='store_true', help='Dumps the database')
parser.add_argument('--restore', action='store_true', help='Restores the database')
arguments = parser.parse_args()
with open(CONFIG, 'r') as f:
config = json.load(f)
param = config['backup']
if arguments.dump:
path = __build_path(param)
subprocess.run(DUMP.format(path).split(), check=True)
subprocess.run(GZIP.format(path).split(), check=True)
backups = __list_backups(param)
history = param['history']
if len(backups) > history:
delete = backups[history:]
for file in delete: os.remove(file)
result = 0
elif arguments.restore:
backups = __list_backups(param)
if len(backups) > 0:
for item in backups:
try:
subprocess.run(COPY.format(item, '/tmp/tmp.sql.gz').split(), check=True)
subprocess.run(GUNZIP.format('/tmp/tmp.sql.gz').split(), check=True)
subprocess.run(RESTORE.format('/tmp/tmp.sql').split(), check=True)
result = 0
break
except: pass
else:
parser.print_help()
return result
# In[ ]:
if __name__ == '__main__':
sys.exit(main())
|
[
"os.remove",
"json.load",
"argparse.ArgumentParser",
"datetime.datetime.now",
"os.path.join",
"os.listdir"
] |
[((581, 608), 'os.listdir', 'os.listdir', (["param['folder']"], {}), "(param['folder'])\n", (591, 608), False, 'import os\n'), ((833, 918), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Utility to dump/restore postgres database"""'}), "(description='Utility to dump/restore postgres database'\n )\n", (856, 918), False, 'import argparse\n'), ((1175, 1187), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1184, 1187), False, 'import json\n'), ((370, 393), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (391, 393), False, 'import datetime\n'), ((723, 758), 'os.path.join', 'os.path.join', (["param['folder']", 'file'], {}), "(param['folder'], file)\n", (735, 758), False, 'import os\n'), ((1593, 1608), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (1602, 1608), False, 'import os\n')]
|
import setuptools
with open("readME.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="sortdict",
version="0.0.1",
author="<NAME>",
author_email="<EMAIL>",
description="This library is to sort dictionary list given to corresponding keys",
long_description="This packages sorts dictionary in efficient and fastest way",
long_description_content_type="text/markdown",
url="https://github.com/ANSHUL217",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
|
[
"setuptools.find_packages"
] |
[((472, 498), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (496, 498), False, 'import setuptools\n')]
|
"""Test utilities."""
import pickle
import ibis
import ibis.util as util
def assert_equal(left, right):
"""Assert that two ibis objects are equal."""
if util.all_of([left, right], ibis.Schema):
assert left.equals(right), 'Comparing schemas: \n{!r} !=\n{!r}'.format(
left, right
)
else:
assert left.equals(right), 'Objects unequal: {}\nvs\n{}'.format(
repr(left), repr(right)
)
def assert_pickle_roundtrip(obj):
"""Assert that an ibis object remains the
same after pickling and unpickling."""
loaded = pickle.loads(pickle.dumps(obj))
if hasattr(obj, "equals"):
assert obj.equals(loaded)
else:
assert obj == loaded
|
[
"ibis.util.all_of",
"pickle.dumps"
] |
[((166, 205), 'ibis.util.all_of', 'util.all_of', (['[left, right]', 'ibis.Schema'], {}), '([left, right], ibis.Schema)\n', (177, 205), True, 'import ibis.util as util\n'), ((601, 618), 'pickle.dumps', 'pickle.dumps', (['obj'], {}), '(obj)\n', (613, 618), False, 'import pickle\n')]
|
import torch
import functools
from torch.optim import Adam
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
from torchvision.datasets import MNIST
import tqdm
import numpy as np
from .model import ScoreNet
# @title Set up the SDE
device = None
def marginal_prob_std(t, sigma):
"""Compute the mean and standard deviation of $p_{0t}(x(t) | x(0))$.
Args:
t: A vector of time steps.
sigma: The $\sigma$ in our SDE.
Returns:
The standard deviation.
"""
t = torch.tensor(t, device=device)
return torch.sqrt((sigma ** (2 * t) - 1.) / 2. / np.log(sigma))
def diffusion_coeff(t, sigma):
"""Compute the diffusion coefficient of our SDE.
Args:
t: A vector of time steps.
sigma: The $\sigma$ in our SDE.
Returns:
The vector of diffusion coefficients.
"""
return torch.tensor(sigma ** t, device=device)
sigma = 25.0 # @param {'type':'number'}
marginal_prob_std_fn = functools.partial(marginal_prob_std, sigma=sigma)
diffusion_coeff_fn = functools.partial(diffusion_coeff, sigma=sigma)
#@title Define the loss function (double click to expand or collapse)
def loss_fn(model, x, marginal_prob_std, eps=1e-5):
"""The loss function for training score-based generative models.
Args:
model: A PyTorch model instance that represents a
time-dependent score-based model.
x: A mini-batch of training data.
marginal_prob_std: A function that gives the standard deviation of
the perturbation kernel.
eps: A tolerance value for numerical stability.
"""
random_t = torch.rand(x.shape[0], device=x.device) * (1. - eps) + eps
z = torch.randn_like(x)
std = marginal_prob_std(random_t)
perturbed_x = x + z * std[:, None, None, None]
score = model(perturbed_x, random_t)
loss = torch.mean(torch.sum((score * std[:, None, None, None] + z)**2, dim=(1,2,3)))
return loss
#@title Training (double click to expand or collapse)
def train_sde(*, data_loader, device_, n_epochs = 50, lr = 1e-4):
device = device_
score_model = torch.nn.DataParallel(ScoreNet(marginal_prob_std=marginal_prob_std_fn))
score_model = score_model.to(device)
optimizer = Adam(score_model.parameters(), lr=lr)
tqdm_epoch = tqdm.trange(n_epochs)
for epoch in tqdm_epoch:
avg_loss = 0.
num_items = 0
for x, y in data_loader:
x = x.to(device)
loss = loss_fn(score_model, x, marginal_prob_std_fn)
optimizer.zero_grad()
loss.backward()
optimizer.step()
avg_loss += loss.item() * x.shape[0]
num_items += x.shape[0]
# Print the averaged training loss so far.
tqdm_epoch.set_description('Average Loss: {:5f}'.format(avg_loss / num_items))
# Update the checkpoint after each epoch of training.
torch.save(score_model.state_dict(), 'ckpt.pth')
return score_model
# @title Define the Euler-Maruyama sampler (double click to expand or collapse)
## The number of sampling steps.
num_steps = 500 # @param {'type':'integer'}
def Euler_Maruyama_sampler(score_model,
marginal_prob_std=marginal_prob_std_fn,
diffusion_coeff=diffusion_coeff_fn,
batch_size=64,
num_steps=num_steps,
device='cuda',
eps=1e-3):
"""Generate samples from score-based models with the Euler-Maruyama solver.
Args:
score_model: A PyTorch model that represents the time-dependent score-based model.
marginal_prob_std: A function that gives the standard deviation of
the perturbation kernel.
diffusion_coeff: A function that gives the diffusion coefficient of the SDE.
batch_size: The number of samplers to generate by calling this function once.
num_steps: The number of sampling steps.
Equivalent to the number of discretized time steps.
device: 'cuda' for running on GPUs, and 'cpu' for running on CPUs.
eps: The smallest time step for numerical stability.
Returns:
Samples.
"""
t = torch.ones(batch_size, device=device)
init_x = torch.randn(batch_size, 1, 28, 28, device=device) \
* marginal_prob_std(t)[:, None, None, None]
time_steps = torch.linspace(1., eps, num_steps, device=device)
step_size = time_steps[0] - time_steps[1]
x = init_x
with torch.no_grad():
for time_step in tqdm.tqdm(time_steps):
batch_time_step = torch.ones(batch_size, device=device) * time_step
g = diffusion_coeff(batch_time_step)
mean_x = x + (g ** 2)[:, None, None, None] * score_model(x, batch_time_step) * step_size
x = mean_x + torch.sqrt(step_size) * g[:, None, None, None] * torch.randn_like(x)
# Do not include any noise in the last sampling step.
return mean_x
|
[
"functools.partial",
"torch.ones",
"tqdm.tqdm",
"numpy.log",
"torch.randn_like",
"tqdm.trange",
"torch.sqrt",
"torch.randn",
"torch.rand",
"torch.linspace",
"torch.no_grad",
"torch.sum",
"torch.tensor"
] |
[((981, 1030), 'functools.partial', 'functools.partial', (['marginal_prob_std'], {'sigma': 'sigma'}), '(marginal_prob_std, sigma=sigma)\n', (998, 1030), False, 'import functools\n'), ((1052, 1099), 'functools.partial', 'functools.partial', (['diffusion_coeff'], {'sigma': 'sigma'}), '(diffusion_coeff, sigma=sigma)\n', (1069, 1099), False, 'import functools\n'), ((531, 561), 'torch.tensor', 'torch.tensor', (['t'], {'device': 'device'}), '(t, device=device)\n', (543, 561), False, 'import torch\n'), ((875, 914), 'torch.tensor', 'torch.tensor', (['(sigma ** t)'], {'device': 'device'}), '(sigma ** t, device=device)\n', (887, 914), False, 'import torch\n'), ((1671, 1690), 'torch.randn_like', 'torch.randn_like', (['x'], {}), '(x)\n', (1687, 1690), False, 'import torch\n'), ((2263, 2284), 'tqdm.trange', 'tqdm.trange', (['n_epochs'], {}), '(n_epochs)\n', (2274, 2284), False, 'import tqdm\n'), ((4180, 4217), 'torch.ones', 'torch.ones', (['batch_size'], {'device': 'device'}), '(batch_size, device=device)\n', (4190, 4217), False, 'import torch\n'), ((4357, 4407), 'torch.linspace', 'torch.linspace', (['(1.0)', 'eps', 'num_steps'], {'device': 'device'}), '(1.0, eps, num_steps, device=device)\n', (4371, 4407), False, 'import torch\n'), ((1835, 1904), 'torch.sum', 'torch.sum', (['((score * std[:, None, None, None] + z) ** 2)'], {'dim': '(1, 2, 3)'}), '((score * std[:, None, None, None] + z) ** 2, dim=(1, 2, 3))\n', (1844, 1904), False, 'import torch\n'), ((4231, 4280), 'torch.randn', 'torch.randn', (['batch_size', '(1)', '(28)', '(28)'], {'device': 'device'}), '(batch_size, 1, 28, 28, device=device)\n', (4242, 4280), False, 'import torch\n'), ((4477, 4492), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4490, 4492), False, 'import torch\n'), ((4519, 4540), 'tqdm.tqdm', 'tqdm.tqdm', (['time_steps'], {}), '(time_steps)\n', (4528, 4540), False, 'import tqdm\n'), ((615, 628), 'numpy.log', 'np.log', (['sigma'], {}), '(sigma)\n', (621, 628), True, 'import numpy as np\n'), ((1606, 1645), 'torch.rand', 'torch.rand', (['x.shape[0]'], {'device': 'x.device'}), '(x.shape[0], device=x.device)\n', (1616, 1645), False, 'import torch\n'), ((4572, 4609), 'torch.ones', 'torch.ones', (['batch_size'], {'device': 'device'}), '(batch_size, device=device)\n', (4582, 4609), False, 'import torch\n'), ((4846, 4865), 'torch.randn_like', 'torch.randn_like', (['x'], {}), '(x)\n', (4862, 4865), False, 'import torch\n'), ((4797, 4818), 'torch.sqrt', 'torch.sqrt', (['step_size'], {}), '(step_size)\n', (4807, 4818), False, 'import torch\n')]
|
from testlauncher import log_run
def setup_package():
log_run("Inside testlauncher.grouping.setup_package")
def teardown_package():
log_run("Inside testlauncher.grouping.teardown_package")
|
[
"testlauncher.log_run"
] |
[((59, 112), 'testlauncher.log_run', 'log_run', (['"""Inside testlauncher.grouping.setup_package"""'], {}), "('Inside testlauncher.grouping.setup_package')\n", (66, 112), False, 'from testlauncher import log_run\n'), ((142, 198), 'testlauncher.log_run', 'log_run', (['"""Inside testlauncher.grouping.teardown_package"""'], {}), "('Inside testlauncher.grouping.teardown_package')\n", (149, 198), False, 'from testlauncher import log_run\n')]
|
# Copyright 2021 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
import jax
from jax import random
from jax.nn import initializers
import jax.numpy as jnp
import numpy as onp
from typing import Any, Tuple
from flax import linen as nn
from flax.core import Scope
# Parse absl flags test_srcdir and test_tmpdir.
jax.config.parse_flags_with_absl()
# Require JAX omnistaging mode.
jax.config.enable_omnistaging()
class Dummy(nn.Module):
@nn.compact
def __call__(self):
self.param('foo', lambda rng: 1)
class ModuleTopLevelTest(absltest.TestCase):
pass
# def test_toplevel_immutable(self):
# d = Dummy(parent=None)
# with self.assertRaisesRegex(BaseException, "orphaned module"):
# d()
# def test_toplevel_initialized_requires_rng(self):
# with self.assertRaisesRegex(BaseException, "missing 1 required.*rngs"):
# d = Dummy(parent=None).initialized()
# def test_toplevel_initialized_with_rng(self):
# d = Dummy(parent=None).initialized(rngs={'params': random.PRNGKey(0)})
# self.assertEqual(d.variables.param.foo, 1)
# def test_toplevel_initialized_frozen(self):
# d = Dummy(parent=None).initialized(rngs={'params': random.PRNGKey(0)})
# with self.assertRaisesRegex(BaseException, "Can't set value"):
# d.variables.param.foo = 2
# def test_toplevel_initialized_has_new_scope(self):
# d = Dummy(parent=None)
# # initializing should make a copy and not have any effect
# # on `d` itself.
# d_initialized = d.initialized(rngs={'params': random.PRNGKey(0)})
# # ... make sure that indeed `d` has no scope.
# self.assertIsNone(d.scope)
# def test_can_only_call_initialized_once(self):
# d = Dummy(parent=None)
# d = d.initialized(rngs={'params': random.PRNGKey(0)})
# with self.assertRaises(BaseException):
# d.initialized(rngs={'params': random.PRNGKey(0)})
if __name__ == '__main__':
absltest.main()
|
[
"jax.config.parse_flags_with_absl",
"absl.testing.absltest.main",
"jax.config.enable_omnistaging"
] |
[((865, 899), 'jax.config.parse_flags_with_absl', 'jax.config.parse_flags_with_absl', ([], {}), '()\n', (897, 899), False, 'import jax\n'), ((932, 963), 'jax.config.enable_omnistaging', 'jax.config.enable_omnistaging', ([], {}), '()\n', (961, 963), False, 'import jax\n'), ((2452, 2467), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (2465, 2467), False, 'from absl.testing import absltest\n')]
|
import boto3
import os
os.environ['AWS_SHARED_CREDENTIALS_FILE'] = './AWS.txt'
s3 = boto3.Session(profile_name='default').client('s3')
for i in range(1, 16):
s3.download_file(Bucket='acmilannesta', Key='model-oof-'+str(i+1)+'.h5', Filename='/wdata/model-oof-'+str(i+1)+'.h5')
|
[
"boto3.Session"
] |
[((89, 126), 'boto3.Session', 'boto3.Session', ([], {'profile_name': '"""default"""'}), "(profile_name='default')\n", (102, 126), False, 'import boto3\n')]
|
# coding: utf-8
import gzip
import io
import six
import mock
import cherrypy
from cherrypy._cpcompat import IncompleteRead, ntob, ntou
europoundUnicode = ntou('£', encoding='utf-8')
sing = ntou("毛泽东: Sing, Little Birdie?", encoding='utf-8')
sing8 = sing.encode('utf-8')
sing16 = sing.encode('utf-16')
from cherrypy.test import helper
class EncodingTests(helper.CPWebCase):
def setup_server():
class Root:
@cherrypy.expose
def index(self, param):
assert param == europoundUnicode, "%r != %r" % (
param, europoundUnicode)
yield europoundUnicode
@cherrypy.expose
def mao_zedong(self):
return sing
@cherrypy.expose
@cherrypy.config(**{'tools.encode.encoding': 'utf-8'})
def utf8(self):
return sing8
@cherrypy.expose
def cookies_and_headers(self):
# if the headers have non-ascii characters and a cookie has
# any part which is unicode (even ascii), the response
# should not fail.
cherrypy.response.cookie['candy'] = 'bar'
cherrypy.response.cookie['candy']['domain'] = 'cherrypy.org'
cherrypy.response.headers[
'Some-Header'] = 'My d\xc3\xb6g has fleas'
return 'Any content'
@cherrypy.expose
def reqparams(self, *args, **kwargs):
return ntob(', ').join(
[": ".join((k, v)).encode('utf8')
for k, v in sorted(cherrypy.request.params.items())]
)
@cherrypy.expose
@cherrypy.config(**{
'tools.encode.text_only': False,
'tools.encode.add_charset': True,
})
def nontext(self, *args, **kwargs):
cherrypy.response.headers[
'Content-Type'] = 'application/binary'
return '\x00\x01\x02\x03'
class GZIP:
@cherrypy.expose
def index(self):
yield "Hello, world"
@cherrypy.expose
# Turn encoding off so the gzip tool is the one doing the collapse.
@cherrypy.config(**{'tools.encode.on': False})
def noshow(self):
# Test for ticket #147, where yield showed no exceptions
# (content-encoding was still gzip even though traceback
# wasn't zipped).
raise IndexError()
yield "Here be dragons"
@cherrypy.expose
@cherrypy.config(**{'response.stream': True})
def noshow_stream(self):
# Test for ticket #147, where yield showed no exceptions
# (content-encoding was still gzip even though traceback
# wasn't zipped).
raise IndexError()
yield "Here be dragons"
class Decode:
@cherrypy.expose
@cherrypy.config(**{
'tools.decode.on': True,
'tools.decode.default_encoding': ['utf-16'],
})
def extra_charset(self, *args, **kwargs):
return ', '.join([": ".join((k, v))
for k, v in cherrypy.request.params.items()])
@cherrypy.expose
@cherrypy.config(**{
'tools.decode.on': True,
'tools.decode.encoding': 'utf-16',
})
def force_charset(self, *args, **kwargs):
return ', '.join([": ".join((k, v))
for k, v in cherrypy.request.params.items()])
root = Root()
root.gzip = GZIP()
root.decode = Decode()
cherrypy.tree.mount(root, config={'/gzip': {'tools.gzip.on': True}})
setup_server = staticmethod(setup_server)
def test_query_string_decoding(self):
if six.PY3:
# This test fails on Python 3. See #1443
return
europoundUtf8 = europoundUnicode.encode('utf-8')
self.getPage(ntob('/?param=') + europoundUtf8)
self.assertBody(europoundUtf8)
# Encoded utf8 query strings MUST be parsed correctly.
# Here, q is the POUND SIGN U+00A3 encoded in utf8 and then %HEX
self.getPage("/reqparams?q=%C2%A3")
# The return value will be encoded as utf8.
self.assertBody(ntob("q: \xc2\xa3"))
# Query strings that are incorrectly encoded MUST raise 404.
# Here, q is the POUND SIGN U+00A3 encoded in latin1 and then %HEX
self.getPage("/reqparams?q=%A3")
self.assertStatus(404)
self.assertErrorPage(
404,
"The given query string could not be processed. Query "
"strings for this resource must be encoded with 'utf8'.")
def test_urlencoded_decoding(self):
# Test the decoding of an application/x-www-form-urlencoded entity.
europoundUtf8 = europoundUnicode.encode('utf-8')
body = ntob("param=") + europoundUtf8
self.getPage('/',
method='POST',
headers=[
("Content-Type", "application/x-www-form-urlencoded"),
("Content-Length", str(len(body))),
],
body=body),
self.assertBody(europoundUtf8)
# Encoded utf8 entities MUST be parsed and decoded correctly.
# Here, q is the POUND SIGN U+00A3 encoded in utf8
body = ntob("q=\xc2\xa3")
self.getPage('/reqparams', method='POST',
headers=[(
"Content-Type", "application/x-www-form-urlencoded"),
("Content-Length", str(len(body))),
],
body=body),
self.assertBody(ntob("q: \xc2\xa3"))
# ...and in utf16, which is not in the default attempt_charsets list:
body = ntob("\xff\xfeq\x00=\xff\xfe\xa3\x00")
self.getPage('/reqparams',
method='POST',
headers=[
("Content-Type",
"application/x-www-form-urlencoded;charset=utf-16"),
("Content-Length", str(len(body))),
],
body=body),
self.assertBody(ntob("q: \xc2\xa3"))
# Entities that are incorrectly encoded MUST raise 400.
# Here, q is the POUND SIGN U+00A3 encoded in utf16, but
# the Content-Type incorrectly labels it utf-8.
body = ntob("\xff\xfeq\x00=\xff\xfe\xa3\x00")
self.getPage('/reqparams',
method='POST',
headers=[
("Content-Type",
"application/x-www-form-urlencoded;charset=utf-8"),
("Content-Length", str(len(body))),
],
body=body),
self.assertStatus(400)
self.assertErrorPage(
400,
"The request entity could not be decoded. The following charsets "
"were attempted: ['utf-8']")
def test_decode_tool(self):
# An extra charset should be tried first, and succeed if it matches.
# Here, we add utf-16 as a charset and pass a utf-16 body.
body = ntob("\xff\xfeq\x00=\xff\xfe\xa3\x00")
self.getPage('/decode/extra_charset', method='POST',
headers=[(
"Content-Type", "application/x-www-form-urlencoded"),
("Content-Length", str(len(body))),
],
body=body),
self.assertBody(ntob("q: \xc2\xa3"))
# An extra charset should be tried first, and continue to other default
# charsets if it doesn't match.
# Here, we add utf-16 as a charset but still pass a utf-8 body.
body = ntob("q=\xc2\xa3")
self.getPage('/decode/extra_charset', method='POST',
headers=[(
"Content-Type", "application/x-www-form-urlencoded"),
("Content-Length", str(len(body))),
],
body=body),
self.assertBody(ntob("q: \xc2\xa3"))
# An extra charset should error if force is True and it doesn't match.
# Here, we force utf-16 as a charset but still pass a utf-8 body.
body = ntob("q=\xc2\xa3")
self.getPage('/decode/force_charset', method='POST',
headers=[(
"Content-Type", "application/x-www-form-urlencoded"),
("Content-Length", str(len(body))),
],
body=body),
self.assertErrorPage(
400,
"The request entity could not be decoded. The following charsets "
"were attempted: ['utf-16']")
def test_multipart_decoding(self):
# Test the decoding of a multipart entity when the charset (utf16) is
# explicitly given.
body = ntob('\r\n'.join([
'--X',
'Content-Type: text/plain;charset=utf-16',
'Content-Disposition: form-data; name="text"',
'',
'\xff\xfea\x00b\x00\x1c c\x00',
'--X',
'Content-Type: text/plain;charset=utf-16',
'Content-Disposition: form-data; name="submit"',
'',
'\xff\xfeC\x00r\x00e\x00a\x00t\x00e\x00',
'--X--'
]))
self.getPage('/reqparams', method='POST',
headers=[(
"Content-Type", "multipart/form-data;boundary=X"),
("Content-Length", str(len(body))),
],
body=body),
self.assertBody(ntob("submit: Create, text: ab\xe2\x80\x9cc"))
@mock.patch('cherrypy._cpreqbody.Part.maxrambytes', 1)
def test_multipart_decoding_bigger_maxrambytes(self):
"""
Decoding of a multipart entity should also pass when
the entity is bigger than maxrambytes. See ticket #1352.
"""
self.test_multipart_decoding()
def test_multipart_decoding_no_charset(self):
# Test the decoding of a multipart entity when the charset (utf8) is
# NOT explicitly given, but is in the list of charsets to attempt.
body = ntob('\r\n'.join([
'--X',
'Content-Disposition: form-data; name="text"',
'',
'\xe2\x80\x9c',
'--X',
'Content-Disposition: form-data; name="submit"',
'',
'Create',
'--X--'
]))
self.getPage('/reqparams', method='POST',
headers=[(
"Content-Type", "multipart/form-data;boundary=X"),
("Content-Length", str(len(body))),
],
body=body),
self.assertBody(ntob("submit: Create, text: \xe2\x80\x9c"))
def test_multipart_decoding_no_successful_charset(self):
# Test the decoding of a multipart entity when the charset (utf16) is
# NOT explicitly given, and is NOT in the list of charsets to attempt.
body = ntob('\r\n'.join([
'--X',
'Content-Disposition: form-data; name="text"',
'',
'\xff\xfea\x00b\x00\x1c c\x00',
'--X',
'Content-Disposition: form-data; name="submit"',
'',
'\xff\xfeC\x00r\x00e\x00a\x00t\x00e\x00',
'--X--'
]))
self.getPage('/reqparams', method='POST',
headers=[(
"Content-Type", "multipart/form-data;boundary=X"),
("Content-Length", str(len(body))),
],
body=body),
self.assertStatus(400)
self.assertErrorPage(
400,
"The request entity could not be decoded. The following charsets "
"were attempted: ['us-ascii', 'utf-8']")
def test_nontext(self):
self.getPage('/nontext')
self.assertHeader('Content-Type', 'application/binary;charset=utf-8')
self.assertBody('\x00\x01\x02\x03')
def testEncoding(self):
# Default encoding should be utf-8
self.getPage('/mao_zedong')
self.assertBody(sing8)
# Ask for utf-16.
self.getPage('/mao_zedong', [('Accept-Charset', 'utf-16')])
self.assertHeader('Content-Type', 'text/html;charset=utf-16')
self.assertBody(sing16)
# Ask for multiple encodings. ISO-8859-1 should fail, and utf-16
# should be produced.
self.getPage('/mao_zedong', [('Accept-Charset',
'iso-8859-1;q=1, utf-16;q=0.5')])
self.assertBody(sing16)
# The "*" value should default to our default_encoding, utf-8
self.getPage('/mao_zedong', [('Accept-Charset', '*;q=1, utf-7;q=.2')])
self.assertBody(sing8)
# Only allow iso-8859-1, which should fail and raise 406.
self.getPage('/mao_zedong', [('Accept-Charset', 'iso-8859-1, *;q=0')])
self.assertStatus("406 Not Acceptable")
self.assertInBody("Your client sent this Accept-Charset header: "
"iso-8859-1, *;q=0. We tried these charsets: "
"iso-8859-1.")
# Ask for x-mac-ce, which should be unknown. See ticket #569.
self.getPage('/mao_zedong', [('Accept-Charset',
'us-ascii, ISO-8859-1, x-mac-ce')])
self.assertStatus("406 Not Acceptable")
self.assertInBody("Your client sent this Accept-Charset header: "
"us-ascii, ISO-8859-1, x-mac-ce. We tried these "
"charsets: ISO-8859-1, us-ascii, x-mac-ce.")
# Test the 'encoding' arg to encode.
self.getPage('/utf8')
self.assertBody(sing8)
self.getPage('/utf8', [('Accept-Charset', 'us-ascii, ISO-8859-1')])
self.assertStatus("406 Not Acceptable")
def testGzip(self):
zbuf = io.BytesIO()
zfile = gzip.GzipFile(mode='wb', fileobj=zbuf, compresslevel=9)
zfile.write(ntob("Hello, world"))
zfile.close()
self.getPage('/gzip/', headers=[("Accept-Encoding", "gzip")])
self.assertInBody(zbuf.getvalue()[:3])
self.assertHeader("Vary", "Accept-Encoding")
self.assertHeader("Content-Encoding", "gzip")
# Test when gzip is denied.
self.getPage('/gzip/', headers=[("Accept-Encoding", "identity")])
self.assertHeader("Vary", "Accept-Encoding")
self.assertNoHeader("Content-Encoding")
self.assertBody("Hello, world")
self.getPage('/gzip/', headers=[("Accept-Encoding", "gzip;q=0")])
self.assertHeader("Vary", "Accept-Encoding")
self.assertNoHeader("Content-Encoding")
self.assertBody("Hello, world")
self.getPage('/gzip/', headers=[("Accept-Encoding", "*;q=0")])
self.assertStatus(406)
self.assertNoHeader("Content-Encoding")
self.assertErrorPage(406, "identity, gzip")
# Test for ticket #147
self.getPage('/gzip/noshow', headers=[("Accept-Encoding", "gzip")])
self.assertNoHeader('Content-Encoding')
self.assertStatus(500)
self.assertErrorPage(500, pattern="IndexError\n")
# In this case, there's nothing we can do to deliver a
# readable page, since 1) the gzip header is already set,
# and 2) we may have already written some of the body.
# The fix is to never stream yields when using gzip.
if (cherrypy.server.protocol_version == "HTTP/1.0" or
getattr(cherrypy.server, "using_apache", False)):
self.getPage('/gzip/noshow_stream',
headers=[("Accept-Encoding", "gzip")])
self.assertHeader('Content-Encoding', 'gzip')
self.assertInBody('\x1f\x8b\x08\x00')
else:
# The wsgiserver will simply stop sending data, and the HTTP client
# will error due to an incomplete chunk-encoded stream.
self.assertRaises((ValueError, IncompleteRead), self.getPage,
'/gzip/noshow_stream',
headers=[("Accept-Encoding", "gzip")])
def test_UnicodeHeaders(self):
self.getPage('/cookies_and_headers')
self.assertBody('Any content')
|
[
"io.BytesIO",
"cherrypy.request.params.items",
"cherrypy._cpcompat.ntob",
"mock.patch",
"cherrypy.tree.mount",
"gzip.GzipFile",
"cherrypy.config",
"cherrypy._cpcompat.ntou"
] |
[((159, 186), 'cherrypy._cpcompat.ntou', 'ntou', (['"""£"""'], {'encoding': '"""utf-8"""'}), "('£', encoding='utf-8')\n", (163, 186), False, 'from cherrypy._cpcompat import IncompleteRead, ntob, ntou\n'), ((194, 245), 'cherrypy._cpcompat.ntou', 'ntou', (['"""毛泽东: Sing, Little Birdie?"""'], {'encoding': '"""utf-8"""'}), "('毛泽东: Sing, Little Birdie?', encoding='utf-8')\n", (198, 245), False, 'from cherrypy._cpcompat import IncompleteRead, ntob, ntou\n'), ((9997, 10050), 'mock.patch', 'mock.patch', (['"""cherrypy._cpreqbody.Part.maxrambytes"""', '(1)'], {}), "('cherrypy._cpreqbody.Part.maxrambytes', 1)\n", (10007, 10050), False, 'import mock\n'), ((3844, 3912), 'cherrypy.tree.mount', 'cherrypy.tree.mount', (['root'], {'config': "{'/gzip': {'tools.gzip.on': True}}"}), "(root, config={'/gzip': {'tools.gzip.on': True}})\n", (3863, 3912), False, 'import cherrypy\n'), ((5620, 5632), 'cherrypy._cpcompat.ntob', 'ntob', (['"""q=£"""'], {}), "('q=£')\n", (5624, 5632), False, 'from cherrypy._cpcompat import IncompleteRead, ntob, ntou\n'), ((6057, 6080), 'cherrypy._cpcompat.ntob', 'ntob', (["'ÿþq\\x00=ÿþ£\\x00'"], {}), "('ÿþq\\x00=ÿþ£\\x00')\n", (6061, 6080), False, 'from cherrypy._cpcompat import IncompleteRead, ntob, ntou\n'), ((6683, 6706), 'cherrypy._cpcompat.ntob', 'ntob', (["'ÿþq\\x00=ÿþ£\\x00'"], {}), "('ÿþq\\x00=ÿþ£\\x00')\n", (6687, 6706), False, 'from cherrypy._cpcompat import IncompleteRead, ntob, ntou\n'), ((7452, 7475), 'cherrypy._cpcompat.ntob', 'ntob', (["'ÿþq\\x00=ÿþ£\\x00'"], {}), "('ÿþq\\x00=ÿþ£\\x00')\n", (7456, 7475), False, 'from cherrypy._cpcompat import IncompleteRead, ntob, ntou\n'), ((8034, 8046), 'cherrypy._cpcompat.ntob', 'ntob', (['"""q=£"""'], {}), "('q=£')\n", (8038, 8046), False, 'from cherrypy._cpcompat import IncompleteRead, ntob, ntou\n'), ((8557, 8569), 'cherrypy._cpcompat.ntob', 'ntob', (['"""q=£"""'], {}), "('q=£')\n", (8561, 8569), False, 'from cherrypy._cpcompat import IncompleteRead, ntob, ntou\n'), ((14298, 14310), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (14308, 14310), False, 'import io\n'), ((14327, 14382), 'gzip.GzipFile', 'gzip.GzipFile', ([], {'mode': '"""wb"""', 'fileobj': 'zbuf', 'compresslevel': '(9)'}), "(mode='wb', fileobj=zbuf, compresslevel=9)\n", (14340, 14382), False, 'import gzip\n'), ((778, 831), 'cherrypy.config', 'cherrypy.config', ([], {}), "(**{'tools.encode.encoding': 'utf-8'})\n", (793, 831), False, 'import cherrypy\n'), ((1733, 1823), 'cherrypy.config', 'cherrypy.config', ([], {}), "(**{'tools.encode.text_only': False,\n 'tools.encode.add_charset': True})\n", (1748, 1823), False, 'import cherrypy\n'), ((2299, 2344), 'cherrypy.config', 'cherrypy.config', ([], {}), "(**{'tools.encode.on': False})\n", (2314, 2344), False, 'import cherrypy\n'), ((2673, 2717), 'cherrypy.config', 'cherrypy.config', ([], {}), "(**{'response.stream': True})\n", (2688, 2717), False, 'import cherrypy\n'), ((3076, 3169), 'cherrypy.config', 'cherrypy.config', ([], {}), "(**{'tools.decode.on': True, 'tools.decode.default_encoding':\n ['utf-16']})\n", (3091, 3169), False, 'import cherrypy\n'), ((3442, 3521), 'cherrypy.config', 'cherrypy.config', ([], {}), "(**{'tools.decode.on': True, 'tools.decode.encoding': 'utf-16'})\n", (3457, 3521), False, 'import cherrypy\n'), ((4502, 4515), 'cherrypy._cpcompat.ntob', 'ntob', (['"""q: £"""'], {}), "('q: £')\n", (4506, 4515), False, 'from cherrypy._cpcompat import IncompleteRead, ntob, ntou\n'), ((5114, 5128), 'cherrypy._cpcompat.ntob', 'ntob', (['"""param="""'], {}), "('param=')\n", (5118, 5128), False, 'from cherrypy._cpcompat import IncompleteRead, ntob, ntou\n'), ((5942, 5955), 'cherrypy._cpcompat.ntob', 'ntob', (['"""q: £"""'], {}), "('q: £')\n", (5946, 5955), False, 'from cherrypy._cpcompat import IncompleteRead, ntob, ntou\n'), ((6461, 6474), 'cherrypy._cpcompat.ntob', 'ntob', (['"""q: £"""'], {}), "('q: £')\n", (6465, 6474), False, 'from cherrypy._cpcompat import IncompleteRead, ntob, ntou\n'), ((7805, 7818), 'cherrypy._cpcompat.ntob', 'ntob', (['"""q: £"""'], {}), "('q: £')\n", (7809, 7818), False, 'from cherrypy._cpcompat import IncompleteRead, ntob, ntou\n'), ((8367, 8380), 'cherrypy._cpcompat.ntob', 'ntob', (['"""q: £"""'], {}), "('q: £')\n", (8371, 8380), False, 'from cherrypy._cpcompat import IncompleteRead, ntob, ntou\n'), ((9944, 9986), 'cherrypy._cpcompat.ntob', 'ntob', (['"""submit: Create, text: abâ\x80\x9cc"""'], {}), "('submit: Create, text: abâ\\x80\\x9cc')\n", (9948, 9986), False, 'from cherrypy._cpcompat import IncompleteRead, ntob, ntou\n'), ((11107, 11146), 'cherrypy._cpcompat.ntob', 'ntob', (['"""submit: Create, text: â\x80\x9c"""'], {}), "('submit: Create, text: â\\x80\\x9c')\n", (11111, 11146), False, 'from cherrypy._cpcompat import IncompleteRead, ntob, ntou\n'), ((14403, 14423), 'cherrypy._cpcompat.ntob', 'ntob', (['"""Hello, world"""'], {}), "('Hello, world')\n", (14407, 14423), False, 'from cherrypy._cpcompat import IncompleteRead, ntob, ntou\n'), ((4172, 4188), 'cherrypy._cpcompat.ntob', 'ntob', (['"""/?param="""'], {}), "('/?param=')\n", (4176, 4188), False, 'from cherrypy._cpcompat import IncompleteRead, ntob, ntou\n'), ((1527, 1537), 'cherrypy._cpcompat.ntob', 'ntob', (['""", """'], {}), "(', ')\n", (1531, 1537), False, 'from cherrypy._cpcompat import IncompleteRead, ntob, ntou\n'), ((3365, 3396), 'cherrypy.request.params.items', 'cherrypy.request.params.items', ([], {}), '()\n', (3394, 3396), False, 'import cherrypy\n'), ((3721, 3752), 'cherrypy.request.params.items', 'cherrypy.request.params.items', ([], {}), '()\n', (3750, 3752), False, 'import cherrypy\n'), ((1638, 1669), 'cherrypy.request.params.items', 'cherrypy.request.params.items', ([], {}), '()\n', (1667, 1669), False, 'import cherrypy\n')]
|
import json
from apistar import test
from app import app
from restpass import redisclient
client = test.TestClient(app)
test_id = {'login': '<EMAIL>', 'password': '<PASSWORD>'}
password_slug = '?p=password<PASSWORD>'
def test_get_ids_list():
redisclient.set_id('test_id', json.dumps(test_id))
response = client.get('/ids/'+password_slug)
assert response.status_code == 200
assert 'test_id' in response.json()['identities']
redisclient.delete_id('test_id')
def test_get_existing_id():
redisclient.set_id('test_id', json.dumps(test_id))
response = client.get('/ids/test_id' + password_slug)
assert response.status_code == 200
assert response.json() == json.loads(json.dumps(test_id))
redisclient.delete_id('test_id')
def test_get_absent_id():
redisclient.delete_id('test_id')
response = client.get('/ids/test_id' + password_slug)
assert response.status_code == 404
def test_create_existing_id():
redisclient.set_id('test_id', json.dumps(test_id))
response = client.post('/ids/test_id' + password_slug +
'&login={}'.format(test_id['login']) +
'&password={}'.format(test_id['password']))
assert response.status_code == 500
redisclient.delete_id('test_id')
def test_create_absent_id():
response = client.post('/ids/test_id' + password_slug +
'&login={}'.format(test_id['login']) +
'&password={}'.format(test_id['password']))
assert response.status_code == 200
assert(redisclient.get_id('test_id'))
assert response.json() == {'created': 'test_id'}
redisclient.delete_id('test_id')
def test_update_existing_id():
redisclient.set_id('test_id', json.dumps(test_id))
response = client.put('/ids/test_id' + password_slug +
'&login={}'.format('updated'))
assert response.status_code == 200
assert response.json() == {'login': 'updated', 'password': '<PASSWORD>'}
response = client.put('/ids/test_id' + password_slug +
'&password={}'.format('<PASSWORD>'))
assert response.status_code == 200
assert response.json() == {'login': 'updated', 'password': '<PASSWORD>'}
response = client.put('/ids/test_id' + password_slug +
'&login={}'.format('again') +
'&password={}'.format('<PASSWORD>'))
assert response.status_code == 200
assert response.json() == {'login': 'again', 'password': '<PASSWORD>'}
response = client.put('/ids/test_id' + password_slug)
assert response.status_code == 400
redisclient.delete_id('test_id')
def test_update_absent_id():
response = client.put('/ids/test_id' + password_slug +
'&password={}'.format('<PASSWORD>'))
assert response.status_code == 404
redisclient.delete_id('test_id')
def test_delete_existing_id():
redisclient.set_id('test_id', json.dumps(test_id))
response = client.delete('/ids/test_id' + password_slug)
assert response.status_code == 200
assert response.json() == {'deleted': 'test_id'}
def test_delete_absent_id():
response = client.delete('/ids/test_id' + password_slug)
assert response.status_code == 404
|
[
"apistar.test.TestClient",
"restpass.redisclient.get_id",
"restpass.redisclient.delete_id",
"json.dumps"
] |
[((100, 120), 'apistar.test.TestClient', 'test.TestClient', (['app'], {}), '(app)\n', (115, 120), False, 'from apistar import test\n'), ((446, 478), 'restpass.redisclient.delete_id', 'redisclient.delete_id', (['"""test_id"""'], {}), "('test_id')\n", (467, 478), False, 'from restpass import redisclient\n'), ((727, 759), 'restpass.redisclient.delete_id', 'redisclient.delete_id', (['"""test_id"""'], {}), "('test_id')\n", (748, 759), False, 'from restpass import redisclient\n'), ((792, 824), 'restpass.redisclient.delete_id', 'redisclient.delete_id', (['"""test_id"""'], {}), "('test_id')\n", (813, 824), False, 'from restpass import redisclient\n'), ((1250, 1282), 'restpass.redisclient.delete_id', 'redisclient.delete_id', (['"""test_id"""'], {}), "('test_id')\n", (1271, 1282), False, 'from restpass import redisclient\n'), ((1561, 1590), 'restpass.redisclient.get_id', 'redisclient.get_id', (['"""test_id"""'], {}), "('test_id')\n", (1579, 1590), False, 'from restpass import redisclient\n'), ((1649, 1681), 'restpass.redisclient.delete_id', 'redisclient.delete_id', (['"""test_id"""'], {}), "('test_id')\n", (1670, 1681), False, 'from restpass import redisclient\n'), ((2633, 2665), 'restpass.redisclient.delete_id', 'redisclient.delete_id', (['"""test_id"""'], {}), "('test_id')\n", (2654, 2665), False, 'from restpass import redisclient\n'), ((2862, 2894), 'restpass.redisclient.delete_id', 'redisclient.delete_id', (['"""test_id"""'], {}), "('test_id')\n", (2883, 2894), False, 'from restpass import redisclient\n'), ((279, 298), 'json.dumps', 'json.dumps', (['test_id'], {}), '(test_id)\n', (289, 298), False, 'import json\n'), ((543, 562), 'json.dumps', 'json.dumps', (['test_id'], {}), '(test_id)\n', (553, 562), False, 'import json\n'), ((989, 1008), 'json.dumps', 'json.dumps', (['test_id'], {}), '(test_id)\n', (999, 1008), False, 'import json\n'), ((1749, 1768), 'json.dumps', 'json.dumps', (['test_id'], {}), '(test_id)\n', (1759, 1768), False, 'import json\n'), ((2962, 2981), 'json.dumps', 'json.dumps', (['test_id'], {}), '(test_id)\n', (2972, 2981), False, 'import json\n'), ((702, 721), 'json.dumps', 'json.dumps', (['test_id'], {}), '(test_id)\n', (712, 721), False, 'import json\n')]
|
from __future__ import absolute_import
from typing import Any, Callable, Dict, List, Optional, Set, Tuple
from sklearn.feature_extraction.text import VectorizerMixin # type: ignore
from sklearn.pipeline import FeatureUnion # type: ignore
from eli5.base import (
DocWeightedSpans, WeightedSpans, FeatureWeights, FeatureWeight,
TargetExplanation)
from eli5.formatters import FormattedFeatureName
from eli5.sklearn.unhashing import InvertableHashingVectorizer
from eli5.sklearn._span_analyzers import build_span_analyzer
def get_weighted_spans(doc, vec, feature_weights):
# type: (Any, Any, FeatureWeights) -> Optional[WeightedSpans]
""" If possible, return a dict with preprocessed document and a list
of spans with weights, corresponding to features in the document.
"""
if isinstance(vec, FeatureUnion):
return _get_weighted_spans_from_union(doc, vec, feature_weights)
else:
result = _get_doc_weighted_spans(doc, vec, feature_weights)
if result is not None:
found_features, doc_weighted_spans = result
return WeightedSpans(
[doc_weighted_spans],
other=_get_other(feature_weights, [('', found_features)]),
)
def add_weighted_spans(doc, vec, vectorized, target_expl):
# type: (Any, Any, bool, TargetExplanation) -> None
"""
Compute and set ``target_expl.weighted_spans`` attribute, when possible.
"""
if vec is None or vectorized:
return
weighted_spans = get_weighted_spans(doc, vec, target_expl.feature_weights)
if weighted_spans:
target_expl.weighted_spans = weighted_spans
FoundFeatures = Dict[Tuple[str, int], float]
def _get_doc_weighted_spans(doc,
vec,
feature_weights, # type: FeatureWeights
feature_fn=None # type: Callable[[str], str]
):
# type: (...) -> Optional[Tuple[FoundFeatures, DocWeightedSpans]]
if isinstance(vec, InvertableHashingVectorizer):
vec = vec.vec
if hasattr(vec, 'get_doc_weighted_spans'):
return vec.get_doc_weighted_spans(doc, feature_weights, feature_fn)
if not isinstance(vec, VectorizerMixin):
return None
span_analyzer, preprocessed_doc = build_span_analyzer(doc, vec)
if span_analyzer is None:
return None
feature_weights_dict = _get_feature_weights_dict(feature_weights,
feature_fn)
spans = []
found_features = {}
for f_spans, feature in span_analyzer(preprocessed_doc):
if feature not in feature_weights_dict:
continue
weight, key = feature_weights_dict[feature]
spans.append((feature, f_spans, weight))
# XXX: this assumes feature names are unique
found_features[key] = weight
return found_features, DocWeightedSpans(
document=preprocessed_doc,
spans=spans,
preserve_density=vec.analyzer.startswith('char'),
)
def _get_feature_weights_dict(feature_weights, # type: FeatureWeights
feature_fn # type: Callable[[str], str]
):
# type: (...) -> Dict[str, Tuple[float, Tuple[str, int]]]
""" Return {feat_name: (weight, (group, idx))} mapping. """
return {
# (group, idx) is an unique feature identifier, e.g. ('pos', 2)
feat_name: (fw.weight, (group, idx))
for group in ['pos', 'neg']
for idx, fw in enumerate(getattr(feature_weights, group))
for feat_name in _get_features(fw.feature, feature_fn)
}
def _get_features(feature, feature_fn=None):
if isinstance(feature, list):
features = [f['name'] for f in feature]
else:
features = [feature]
if feature_fn:
features = list(filter(None, map(feature_fn, features)))
return features
def _get_weighted_spans_from_union(doc, vec_union, feature_weights):
# type: (Any, FeatureUnion, FeatureWeights) -> Optional[WeightedSpans]
docs_weighted_spans = []
named_found_features = []
for vec_name, vec in vec_union.transformer_list:
vec_prefix = '{}__'.format(vec_name)
def feature_fn(name):
if isinstance(name, FormattedFeatureName):
return
if not name.startswith(vec_prefix):
return # drop feature
return name[len(vec_prefix):] # remove prefix
result = _get_doc_weighted_spans(doc, vec, feature_weights, feature_fn)
if result:
found_features, doc_weighted_spans = result
doc_weighted_spans.vec_name = vec_name
named_found_features.append((vec_name, found_features))
docs_weighted_spans.append(doc_weighted_spans)
if docs_weighted_spans:
return WeightedSpans(
docs_weighted_spans,
other=_get_other(feature_weights, named_found_features),
)
def _get_other(feature_weights, named_found_features):
# type: (FeatureWeights, List[Tuple[str, FoundFeatures]]) -> FeatureWeights
# search for items that were not accounted at all.
other_items = []
accounted_keys = set() # type: Set[Tuple[str, int]]
all_found_features = set() # type: Set[Tuple[str, int]]
for _, found_features in named_found_features:
all_found_features.update(found_features)
for group in ['pos', 'neg']:
for idx, fw in enumerate(getattr(feature_weights, group)):
key = (group, idx)
if key not in all_found_features and key not in accounted_keys:
other_items.append(fw)
accounted_keys.add(key)
for vec_name, found_features in named_found_features:
if found_features:
other_items.append(FeatureWeight(
feature=FormattedFeatureName(
'{}Highlighted in text (sum)'.format(
'{}: '.format(vec_name) if vec_name else '')),
weight=sum(found_features.values())))
other_items.sort(key=lambda x: abs(x.weight), reverse=True)
return FeatureWeights(
pos=[fw for fw in other_items if fw.weight >= 0],
neg=[fw for fw in other_items if fw.weight < 0],
pos_remaining=feature_weights.pos_remaining,
neg_remaining=feature_weights.neg_remaining,
)
|
[
"eli5.base.FeatureWeights",
"eli5.sklearn._span_analyzers.build_span_analyzer"
] |
[((2315, 2344), 'eli5.sklearn._span_analyzers.build_span_analyzer', 'build_span_analyzer', (['doc', 'vec'], {}), '(doc, vec)\n', (2334, 2344), False, 'from eli5.sklearn._span_analyzers import build_span_analyzer\n'), ((6158, 6370), 'eli5.base.FeatureWeights', 'FeatureWeights', ([], {'pos': '[fw for fw in other_items if fw.weight >= 0]', 'neg': '[fw for fw in other_items if fw.weight < 0]', 'pos_remaining': 'feature_weights.pos_remaining', 'neg_remaining': 'feature_weights.neg_remaining'}), '(pos=[fw for fw in other_items if fw.weight >= 0], neg=[fw for\n fw in other_items if fw.weight < 0], pos_remaining=feature_weights.\n pos_remaining, neg_remaining=feature_weights.neg_remaining)\n', (6172, 6370), False, 'from eli5.base import DocWeightedSpans, WeightedSpans, FeatureWeights, FeatureWeight, TargetExplanation\n')]
|
import pandas as pd
import json
POSITIVE_DETAIL_DATA_PATH = 'data/2019-ncov-japan/Data/positiveDetail.csv'
OUTPUT_JSON_PATH = 'data/created_json/positive_detail.json'
def create_json_file():
header = ('id', 'announcement_date', 'diagnosis_date', 'prefecture', 'residence_prefecture',
'age', 'gender', 'attribute', 'prefecture_number',
'travel_or_contact', 'detail', 'cluster', 'src', 'onset', 'symptom',
'death_or_discharge_date', 'comment1', 'outcome', 'outcome_src', 'comment2')
positive_detail_df = pd.read_csv(POSITIVE_DETAIL_DATA_PATH, names=header, encoding='utf-8')
with open(OUTPUT_JSON_PATH, 'w', encoding='utf-8') as f:
json.dump(positive_detail_df.drop(positive_detail_df.index[0]).fillna('').to_dict(orient='records'),
f,
indent=2,
ensure_ascii=False)
if __name__ == '__main__':
create_json_file()
|
[
"pandas.read_csv"
] |
[((555, 625), 'pandas.read_csv', 'pd.read_csv', (['POSITIVE_DETAIL_DATA_PATH'], {'names': 'header', 'encoding': '"""utf-8"""'}), "(POSITIVE_DETAIL_DATA_PATH, names=header, encoding='utf-8')\n", (566, 625), True, 'import pandas as pd\n')]
|
from __future__ import absolute_import
import argparse
import os
import logging
import mimetypes
from six import text_type
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions, SetupOptions
from apache_beam.metrics.metric import Metrics
from sciencebeam_gym.utils.collection import (
extend_dict
)
from sciencebeam_gym.beam_utils.utils import (
TransformAndCount,
TransformAndLog,
MapOrLog,
PreventFusion
)
from sciencebeam_gym.beam_utils.files import (
ReadFileList,
FindFiles
)
from sciencebeam_gym.beam_utils.io import (
read_all_from_path,
save_file_content
)
from sciencebeam_gym.beam_utils.main import (
add_cloud_args,
process_cloud_args,
process_sciencebeam_gym_dep_args
)
from sciencebeam_gym.preprocess.preprocessing_utils import (
join_if_relative_path,
get_output_file
)
from sciencebeam.config.app_config import get_app_config
from sciencebeam.pipelines import (
get_pipeline_for_configuration_and_args,
add_pipeline_args
)
LOGGER = logging.getLogger(__name__)
def get_logger():
return logging.getLogger(__name__)
class MetricCounters(object):
FILES = 'files'
class DataProps(object):
SOURCE_FILENAME = 'source_filename'
FILENAME = 'filename'
CONTENT = 'content'
TYPE = 'type'
def FileUrlSource(opt):
if opt.source_file_list:
return ReadFileList(
join_if_relative_path(opt.base_data_path, opt.source_file_list),
column=opt.source_file_column, limit=opt.limit
)
else:
return FindFiles(join_if_relative_path(opt.base_data_path, opt.source_path))
def ReadFileContent():
return "ReadFileContent" >> TransformAndCount(
beam.Map(lambda file_url: {
DataProps.SOURCE_FILENAME: file_url,
DataProps.FILENAME: file_url,
DataProps.CONTENT: read_all_from_path(file_url)
}),
MetricCounters.FILES
)
def get_step_error_counter(step):
return 'error_%s' % step
def get_step_ignored_counter(step):
return 'ignored_%s' % step
def get_step_processed_counter(step):
return 'processed_%s' % step
def execute_or_skip_step(step):
supported_types = step.get_supported_types()
processed_counter = Metrics.counter('PipelineStep', get_step_processed_counter(step))
ignored_counter = Metrics.counter('PipelineStep', get_step_ignored_counter(step))
def wrapper(x):
data_type = x['type']
if data_type in supported_types:
get_logger().debug('excuting step %s: %s (%s)', step, x.keys(), data_type)
result = extend_dict(x, step(x))
get_logger().debug('result of step %s: %s (%s)', step, result.keys(), result.get('type'))
processed_counter.inc()
return result
else:
get_logger().debug(
'skipping step %s, %s not in supported types (%s)', step, data_type, supported_types
)
ignored_counter.inc()
return x
return wrapper
def get_step_transform(step):
step_name = str(step)
return step_name >> MapOrLog(
execute_or_skip_step(step),
log_fn=lambda e, v: (
get_logger().warning(
'caught exception (ignoring item): %s, source file: %s, step: %s',
e, v[DataProps.SOURCE_FILENAME], step_name, exc_info=e
)
), error_count=get_step_error_counter(step)
)
def encode_if_text_type(data):
return data.encode('utf-8') if isinstance(data, text_type) else data
def configure_pipeline(p, opt, pipeline, config):
get_pipeline_output_file = lambda source_url, ext: get_output_file(
source_url,
opt.base_data_path,
opt.output_path,
ext
)
steps = pipeline.get_steps(config, opt)
LOGGER.info('steps: %s', steps)
input_data = (
p |
FileUrlSource(opt) |
PreventFusion() |
ReadFileContent() |
"Determine Type" >> beam.Map(lambda d: extend_dict(d, {
DataProps.TYPE: mimetypes.guess_type(d[DataProps.SOURCE_FILENAME])[0]
}))
)
result = input_data
for step in steps:
LOGGER.debug('step: %s', step)
result |= get_step_transform(step)
_ = (
result |
beam.Map(lambda x: LOGGER.info('result: %s (%s)', x.keys(), x[DataProps.TYPE]))
)
_ = (
result |
"WriteOutput" >> TransformAndLog(
beam.Map(lambda v: save_file_content(
get_pipeline_output_file(
v[DataProps.SOURCE_FILENAME],
opt.output_suffix
),
encode_if_text_type(v[DataProps.CONTENT])
)),
log_fn=lambda x: get_logger().info('saved output to: %s', x)
)
)
def add_main_args(parser):
parser.add_argument(
'--data-path', type=str, required=True,
help='base data path'
)
source_group = parser.add_argument_group('source')
source_one_of_group = source_group.add_mutually_exclusive_group(required=True)
source_one_of_group.add_argument(
'--source-path', type=str, required=False,
help='path to source file(s), relative to data-path'
)
source_one_of_group.add_argument(
'--source-file-list', type=str, required=False,
help='path to source csv/tsv file list'
)
source_group.add_argument(
'--source-file-column', type=str, required=False, default='url',
help='the column of the source file list to use'
)
parser.add_argument(
'--limit', type=int, required=False,
help='limit the number of file pairs to process'
)
output_group = parser.add_argument_group('output')
output_group.add_argument(
'--output-path', required=False,
help='Output directory to write results to.'
)
output_group.add_argument(
'--output-suffix', required=False, default='.xml',
help='Output file suffix to add to the filename (excluding the file extension).'
)
parser.add_argument(
'--debug', action='store_true', default=False,
help='enable debug output'
)
def process_main_args(args):
args.base_data_path = args.data_path.replace('/*/', '/')
if not args.output_path:
args.output_path = os.path.join(
os.path.dirname(args.base_data_path),
os.path.basename(args.base_data_path + '-results')
)
def parse_args(pipeline, config, argv=None):
parser = argparse.ArgumentParser()
add_pipeline_args(parser)
add_main_args(parser)
add_cloud_args(parser)
pipeline.add_arguments(parser, config, argv)
args = parser.parse_args(argv)
if args.debug:
logging.getLogger().setLevel('DEBUG')
process_main_args(args)
process_cloud_args(
args, args.output_path,
name='sciencebeam-convert'
)
process_sciencebeam_gym_dep_args(args)
get_logger().info('args: %s', args)
return args
def run(argv=None):
config = get_app_config()
pipeline = get_pipeline_for_configuration_and_args(config, argv=argv)
args = parse_args(pipeline, config, argv)
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
pipeline_options = PipelineOptions.from_dictionary(vars(args))
pipeline_options.view_as(SetupOptions).save_main_session = True
with beam.Pipeline(args.runner, options=pipeline_options) as p:
configure_pipeline(p, args, pipeline, config)
# Execute the pipeline and wait until it is completed.
if __name__ == '__main__':
logging.basicConfig(level='INFO')
run()
|
[
"sciencebeam_gym.preprocess.preprocessing_utils.get_output_file",
"sciencebeam_gym.beam_utils.main.add_cloud_args",
"argparse.ArgumentParser",
"logging.basicConfig",
"os.path.basename",
"sciencebeam_gym.beam_utils.utils.PreventFusion",
"sciencebeam_gym.beam_utils.main.process_sciencebeam_gym_dep_args",
"os.path.dirname",
"sciencebeam.pipelines.get_pipeline_for_configuration_and_args",
"sciencebeam.config.app_config.get_app_config",
"apache_beam.Pipeline",
"sciencebeam_gym.preprocess.preprocessing_utils.join_if_relative_path",
"mimetypes.guess_type",
"sciencebeam_gym.beam_utils.main.process_cloud_args",
"sciencebeam.pipelines.add_pipeline_args",
"logging.getLogger",
"sciencebeam_gym.beam_utils.io.read_all_from_path"
] |
[((1029, 1056), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1046, 1056), False, 'import logging\n'), ((1085, 1112), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1102, 1112), False, 'import logging\n'), ((6019, 6044), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6042, 6044), False, 'import argparse\n'), ((6047, 6072), 'sciencebeam.pipelines.add_pipeline_args', 'add_pipeline_args', (['parser'], {}), '(parser)\n', (6064, 6072), False, 'from sciencebeam.pipelines import get_pipeline_for_configuration_and_args, add_pipeline_args\n'), ((6099, 6121), 'sciencebeam_gym.beam_utils.main.add_cloud_args', 'add_cloud_args', (['parser'], {}), '(parser)\n', (6113, 6121), False, 'from sciencebeam_gym.beam_utils.main import add_cloud_args, process_cloud_args, process_sciencebeam_gym_dep_args\n'), ((6292, 6362), 'sciencebeam_gym.beam_utils.main.process_cloud_args', 'process_cloud_args', (['args', 'args.output_path'], {'name': '"""sciencebeam-convert"""'}), "(args, args.output_path, name='sciencebeam-convert')\n", (6310, 6362), False, 'from sciencebeam_gym.beam_utils.main import add_cloud_args, process_cloud_args, process_sciencebeam_gym_dep_args\n'), ((6377, 6415), 'sciencebeam_gym.beam_utils.main.process_sciencebeam_gym_dep_args', 'process_sciencebeam_gym_dep_args', (['args'], {}), '(args)\n', (6409, 6415), False, 'from sciencebeam_gym.beam_utils.main import add_cloud_args, process_cloud_args, process_sciencebeam_gym_dep_args\n'), ((6502, 6518), 'sciencebeam.config.app_config.get_app_config', 'get_app_config', ([], {}), '()\n', (6516, 6518), False, 'from sciencebeam.config.app_config import get_app_config\n'), ((6533, 6591), 'sciencebeam.pipelines.get_pipeline_for_configuration_and_args', 'get_pipeline_for_configuration_and_args', (['config'], {'argv': 'argv'}), '(config, argv=argv)\n', (6572, 6591), False, 'from sciencebeam.pipelines import get_pipeline_for_configuration_and_args, add_pipeline_args\n'), ((7131, 7164), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': '"""INFO"""'}), "(level='INFO')\n", (7150, 7164), False, 'import logging\n'), ((3432, 3501), 'sciencebeam_gym.preprocess.preprocessing_utils.get_output_file', 'get_output_file', (['source_url', 'opt.base_data_path', 'opt.output_path', 'ext'], {}), '(source_url, opt.base_data_path, opt.output_path, ext)\n', (3447, 3501), False, 'from sciencebeam_gym.preprocess.preprocessing_utils import join_if_relative_path, get_output_file\n'), ((6931, 6983), 'apache_beam.Pipeline', 'beam.Pipeline', (['args.runner'], {'options': 'pipeline_options'}), '(args.runner, options=pipeline_options)\n', (6944, 6983), True, 'import apache_beam as beam\n'), ((1371, 1434), 'sciencebeam_gym.preprocess.preprocessing_utils.join_if_relative_path', 'join_if_relative_path', (['opt.base_data_path', 'opt.source_file_list'], {}), '(opt.base_data_path, opt.source_file_list)\n', (1392, 1434), False, 'from sciencebeam_gym.preprocess.preprocessing_utils import join_if_relative_path, get_output_file\n'), ((1524, 1582), 'sciencebeam_gym.preprocess.preprocessing_utils.join_if_relative_path', 'join_if_relative_path', (['opt.base_data_path', 'opt.source_path'], {}), '(opt.base_data_path, opt.source_path)\n', (1545, 1582), False, 'from sciencebeam_gym.preprocess.preprocessing_utils import join_if_relative_path, get_output_file\n'), ((5861, 5897), 'os.path.dirname', 'os.path.dirname', (['args.base_data_path'], {}), '(args.base_data_path)\n', (5876, 5897), False, 'import os\n'), ((5905, 5955), 'os.path.basename', 'os.path.basename', (["(args.base_data_path + '-results')"], {}), "(args.base_data_path + '-results')\n", (5921, 5955), False, 'import os\n'), ((3655, 3670), 'sciencebeam_gym.beam_utils.utils.PreventFusion', 'PreventFusion', ([], {}), '()\n', (3668, 3670), False, 'from sciencebeam_gym.beam_utils.utils import TransformAndCount, TransformAndLog, MapOrLog, PreventFusion\n'), ((6225, 6244), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (6242, 6244), False, 'import logging\n'), ((1793, 1821), 'sciencebeam_gym.beam_utils.io.read_all_from_path', 'read_all_from_path', (['file_url'], {}), '(file_url)\n', (1811, 1821), False, 'from sciencebeam_gym.beam_utils.io import read_all_from_path, save_file_content\n'), ((3779, 3829), 'mimetypes.guess_type', 'mimetypes.guess_type', (['d[DataProps.SOURCE_FILENAME]'], {}), '(d[DataProps.SOURCE_FILENAME])\n', (3799, 3829), False, 'import mimetypes\n')]
|
from decimal import Decimal
def test_break_test(get_contract_with_gas_estimation):
break_test = """
@public
def log(n: decimal) -> int128:
c: decimal = n * 1.0
output: int128 = 0
for i in range(400):
c = c / 1.2589
if c < 1.0:
output = i
break
return output
"""
c = get_contract_with_gas_estimation(break_test)
assert c.log(Decimal('1')) == 0
assert c.log(Decimal('2')) == 3
assert c.log(Decimal('10')) == 10
assert c.log(Decimal('200')) == 23
print('Passed for-loop break test')
def test_break_test_2(get_contract_with_gas_estimation):
break_test_2 = """
@public
def log(n: decimal) -> int128:
c: decimal = n * 1.0
output: int128 = 0
for i in range(40):
if c < 10.0:
output = i * 10
break
c = c / 10.0
for i in range(10):
c = c / 1.2589
if c < 1.0:
output = output + i
break
return output
"""
c = get_contract_with_gas_estimation(break_test_2)
assert c.log(Decimal('1')) == 0
assert c.log(Decimal('2')) == 3
assert c.log(Decimal('10')) == 10
assert c.log(Decimal('200')) == 23
assert c.log(Decimal('4000000')) == 66
print('Passed for-loop break test 2')
def test_break_test_3(get_contract_with_gas_estimation):
break_test_3 = """
@public
def log(n: int128) -> int128:
c: decimal = convert(n, decimal)
output: int128 = 0
for i in range(40):
if c < 10.0:
output = i * 10
break
c /= 10.0
for i in range(10):
c /= 1.2589
if c < 1.0:
output = output + i
break
return output
"""
c = get_contract_with_gas_estimation(break_test_3)
assert c.log(1) == 0
assert c.log(2) == 3
assert c.log(10) == 10
assert c.log(200) == 23
assert c.log(4000000) == 66
print('Passed aug-assignment break composite test')
|
[
"decimal.Decimal"
] |
[((400, 412), 'decimal.Decimal', 'Decimal', (['"""1"""'], {}), "('1')\n", (407, 412), False, 'from decimal import Decimal\n'), ((436, 448), 'decimal.Decimal', 'Decimal', (['"""2"""'], {}), "('2')\n", (443, 448), False, 'from decimal import Decimal\n'), ((472, 485), 'decimal.Decimal', 'Decimal', (['"""10"""'], {}), "('10')\n", (479, 485), False, 'from decimal import Decimal\n'), ((510, 524), 'decimal.Decimal', 'Decimal', (['"""200"""'], {}), "('200')\n", (517, 524), False, 'from decimal import Decimal\n'), ((1070, 1082), 'decimal.Decimal', 'Decimal', (['"""1"""'], {}), "('1')\n", (1077, 1082), False, 'from decimal import Decimal\n'), ((1106, 1118), 'decimal.Decimal', 'Decimal', (['"""2"""'], {}), "('2')\n", (1113, 1118), False, 'from decimal import Decimal\n'), ((1142, 1155), 'decimal.Decimal', 'Decimal', (['"""10"""'], {}), "('10')\n", (1149, 1155), False, 'from decimal import Decimal\n'), ((1180, 1194), 'decimal.Decimal', 'Decimal', (['"""200"""'], {}), "('200')\n", (1187, 1194), False, 'from decimal import Decimal\n'), ((1219, 1237), 'decimal.Decimal', 'Decimal', (['"""4000000"""'], {}), "('4000000')\n", (1226, 1237), False, 'from decimal import Decimal\n')]
|
# SPDX-License-Identifier: MIT
# Copyright (c) 2019 Intel Corporation
import os
import io
import json
import shutil
import random
import tempfile
import contextlib
from pathlib import Path
from unittest.mock import patch
from typing import List, AsyncIterator, Dict
from dffml.record import Record
from dffml.feature import Feature, Features
from dffml.source.source import Sources, SourcesContext
from dffml.source.file import FileSourceConfig
from dffml.source.json import JSONSource
from dffml.source.csv import CSVSource, CSVSourceConfig
from dffml.model.model import ModelContext, Model
from dffml.model.accuracy import Accuracy as AccuracyType
from dffml.util.entrypoint import entrypoint
from dffml.util.asynctestcase import (
AsyncTestCase,
AsyncTestCase,
non_existant_tempfile,
)
from dffml.base import config
from dffml.df.base import op
from dffml.cli.cli import Merge
from dffml.cli.ml import Train, Accuracy, Predict
from dffml.cli.list import List
from dffml.cli.dataflow import Dataflow
from .test_df import OPERATIONS, OPIMPS
from dffml import op, DataFlow, Definition
class RecordsTestCase(AsyncTestCase):
async def setUp(self):
await super().setUp()
self.records = [
Record(str(random.random()), data={"features": {"fake": 1}},)
for _ in range(0, 10)
]
self.temp_filename = self.mktempfile()
self.sconfig = FileSourceConfig(
filename=self.temp_filename, readwrite=True, allowempty=True
)
async with JSONSource(self.sconfig) as source:
async with source() as sctx:
for record in self.records:
await sctx.update(record)
contents = json.loads(Path(self.sconfig.filename).read_text())
# Ensure there are records in the file
self.assertEqual(
len(contents.get(self.sconfig.tag)),
len(self.records),
"RecordsTestCase JSON file erroneously initialized as empty",
)
# TODO(p3) For some reason patching Model.load doesn't work
self._stack.enter_context(
patch("dffml.model.model.Model.load", new=model_load)
)
self._stack.enter_context(
patch("dffml.df.base.OperationImplementation.load", new=opimp_load)
)
self._stack.enter_context(
patch("dffml.df.types.Operation.load", new=op_load)
)
@config
class FakeConfig:
features: Features
predict: Feature
location: str = os.path.join(
os.path.expanduser("~"), ".cache", "dffml", "test_cli", "fake"
)
class FakeModelContext(ModelContext):
async def train(self, sources: Sources):
pass
async def predict(self, sources: SourcesContext) -> AsyncIterator[Record]:
target = self.parent.config.predict.name
async for record in sources.with_features(
self.parent.config.features.names()
):
record.predicted(target, random.random(), float(record.key))
yield record
@entrypoint("fake")
class FakeModel(Model):
CONTEXT = FakeModelContext
CONFIG = FakeConfig
def model_load(loading):
if loading == "fake":
return FakeModel
return [FakeModel]
def op_load(loading):
return list(filter(lambda op: loading == op.name, OPERATIONS))[0]
def opimp_load(loading=None):
if loading is not None:
return list(filter(lambda imp: loading == imp.op.name, OPIMPS))[0]
return OPIMPS
class TestMerge(RecordsTestCase):
async def test_json_tag(self):
await Merge.cli(
"src=json",
"dest=json",
"-source-src-filename",
self.temp_filename,
"-source-dest-filename",
self.temp_filename,
"-source-dest-tag",
"sometag",
"-source-src-allowempty",
"-source-dest-allowempty",
"-source-src-readwrite",
"-source-dest-readwrite",
)
# Check the untagged source
with self.subTest(tagged=None):
async with JSONSource(
FileSourceConfig(filename=self.temp_filename)
) as source:
async with source() as sctx:
records = [record async for record in sctx.records()]
self.assertEqual(len(records), len(self.records))
# Check the tagged source
with self.subTest(tagged="sometag"):
async with JSONSource(
FileSourceConfig(filename=self.temp_filename, tag="sometag")
) as source:
async with source() as sctx:
records = [record async for record in sctx.records()]
self.assertEqual(len(records), len(self.records))
async def test_json_to_csv(self):
with non_existant_tempfile() as csv_tempfile:
await Merge.cli(
"src=json",
"dest=csv",
"-source-src-filename",
self.temp_filename,
"-source-dest-filename",
csv_tempfile,
"-source-dest-key",
"key",
"-source-src-allowempty",
"-source-dest-allowempty",
"-source-src-readwrite",
"-source-dest-readwrite",
)
contents = Path(csv_tempfile).read_text()
self.assertEqual(
contents,
"key,tag,fake\n"
+ "\n".join(
[f"{record.key},untagged,1" for record in self.records]
)
+ "\n",
"Incorrect data in csv file",
)
async def test_csv_tag(self):
with non_existant_tempfile() as csv_tempfile:
# Move the pre-populated json data to a csv source
with self.subTest(json_to_csv=True):
await Merge.cli(
"src=json",
"dest=csv",
"-source-src-filename",
self.temp_filename,
"-source-dest-filename",
csv_tempfile,
"-source-src-allowempty",
"-source-dest-allowempty",
"-source-src-readwrite",
"-source-dest-readwrite",
)
# Merge one tag to another within the same file
with self.subTest(merge_same_file=True):
await Merge.cli(
"src=csv",
"dest=csv",
"-source-src-filename",
csv_tempfile,
"-source-dest-filename",
csv_tempfile,
"-source-dest-tag",
"sometag",
"-source-src-allowempty",
"-source-dest-allowempty",
"-source-src-readwrite",
"-source-dest-readwrite",
)
contents = Path(csv_tempfile).read_text()
self.assertIn("untagged", contents)
self.assertIn("sometag", contents)
# Check the untagged source
with self.subTest(tagged=None):
async with CSVSource(
CSVSourceConfig(filename=csv_tempfile)
) as source:
async with source() as sctx:
records = [record async for record in sctx.records()]
self.assertEqual(len(records), len(self.records))
contents = Path(csv_tempfile).read_text()
self.assertIn("sometag", contents)
self.assertIn("untagged", contents)
# Check the tagged source
with self.subTest(tagged="sometag"):
async with CSVSource(
CSVSourceConfig(filename=csv_tempfile, tag="sometag")
) as source:
async with source() as sctx:
records = [record async for record in sctx.records()]
self.assertEqual(len(records), len(self.records))
contents = Path(csv_tempfile).read_text()
self.assertIn("sometag", contents)
self.assertIn("untagged", contents)
class TestListRecords(RecordsTestCase):
async def test_run(self):
result = await List.cli(
"records",
"-sources",
"primary=json",
"-source-primary-filename",
self.temp_filename,
"-source-primary-readwrite",
"true",
)
result = list(map(lambda r: r.export(), result))
result = dict(map(lambda r: (r["key"], r), result))
for record in self.records:
self.assertIn(record.key, result)
class TestDataflowRunAllRecords(RecordsTestCase):
async def test_run(self):
self.record_keys = {"add 40 and 2": 42, "multiply 42 and 10": 420}
self.records = list(map(Record, self.record_keys.keys()))
os.unlink(self.temp_filename)
async with JSONSource(self.sconfig) as source:
async with source() as sctx:
for record in self.records:
await sctx.update(record)
tmpdir = tempfile.mkdtemp()
handle, dataflow_file = tempfile.mkstemp(suffix=".json", dir=tmpdir)
os.close(handle)
with open(dataflow_file, mode="w+b") as dataflow_file:
dataflow = io.StringIO()
with contextlib.redirect_stdout(dataflow):
await Dataflow.cli(
"create",
"-configloader",
"json",
*map(lambda op: op.name, OPERATIONS),
)
dataflow_file.write(dataflow.getvalue().encode())
dataflow_file.seek(0)
results = await Dataflow.cli(
"run",
"records",
"all",
"-dataflow",
dataflow_file.name,
"primary=json",
"-sources",
"primary=json",
"-source-filename",
self.temp_filename,
"-record-def",
"calc_string",
"-inputs",
'["result"]=get_single_spec',
)
results = {
result.key: result.feature("result") for result in results
}
for record in self.records:
self.assertIn(record.key, results)
self.assertEqual(
self.record_keys[record.key], results[record.key]
)
shutil.rmtree(tmpdir)
class TestDataflowRunRecordSet(RecordsTestCase):
async def test_run(self):
test_key = "multiply 42 and 10"
self.record_keys = {"add 40 and 2": 42, "multiply 42 and 10": 420}
self.records = list(map(Record, self.record_keys.keys()))
os.unlink(self.temp_filename)
async with JSONSource(self.sconfig) as source:
async with source() as sctx:
for record in self.records:
await sctx.update(record)
tmpdir = tempfile.mkdtemp()
handle, dataflow_file = tempfile.mkstemp(suffix=".json", dir=tmpdir)
os.close(handle)
with open(dataflow_file, mode="w+b") as dataflow_file:
dataflow = io.StringIO()
with contextlib.redirect_stdout(dataflow):
await Dataflow.cli(
"create",
"-configloader",
"json",
*map(lambda op: op.name, OPERATIONS),
)
dataflow_file.write(dataflow.getvalue().encode())
dataflow_file.seek(0)
results = await Dataflow.cli(
"run",
"records",
"set",
"-keys",
test_key,
"-dataflow",
dataflow_file.name,
"primary=json",
"-sources",
"primary=json",
"-source-filename",
self.temp_filename,
"-record-def",
"calc_string",
"-inputs",
'["result"]=get_single_spec',
)
self.assertEqual(len(results), 1)
self.assertEqual(
self.record_keys[test_key], results[0].feature("result")
)
shutil.rmtree(tmpdir)
class TestDataflowRunSingle(AsyncTestCase):
async def test_run(self):
tmpdir = tempfile.mkdtemp()
handle, dataflow_file = tempfile.mkstemp(suffix=".json", dir=tmpdir)
os.close(handle)
with open(dataflow_file, mode="w+b") as dataflow_file:
dataflow = io.StringIO()
with contextlib.redirect_stdout(dataflow):
await Dataflow.cli(
"create",
"-configloader",
"json",
*map(lambda op: op.name, OPERATIONS),
)
dataflow_file.write(dataflow.getvalue().encode())
dataflow_file.seek(0)
results = await Dataflow.cli(
"run",
"single",
"-dataflow",
dataflow_file.name,
"-inputs",
'["result"]=get_single_spec',
"add 40 and 2=calc_string",
)
self.assertEqual(len(results), 1)
self.assertEqual(results[0], {"result": 42})
shutil.rmtree(tmpdir)
class TestDataflowRunContexts(AsyncTestCase):
async def test_run(self):
tmpdir = tempfile.mkdtemp()
handle, dataflow_file = tempfile.mkstemp(suffix=".json", dir=tmpdir)
os.close(handle)
with open(dataflow_file, mode="w+b") as dataflow_file:
dataflow = io.StringIO()
with contextlib.redirect_stdout(dataflow):
await Dataflow.cli(
"create",
"-configloader",
"json",
*map(lambda op: op.name, OPERATIONS),
)
dataflow_file.write(dataflow.getvalue().encode())
dataflow_file.seek(0)
test_contexts = {"add 40 and 2": 42, "multiply 42 and 10": 420}
results = await Dataflow.cli(
"run",
"contexts",
"-dataflow",
dataflow_file.name,
"-context-def",
"calc_string",
"-contexts",
*test_contexts.keys(),
"-input",
'["result"]=get_single_spec',
)
self.assertCountEqual(
results,
[
{ctx_string: {"result": result}}
for ctx_string, result in test_contexts.items()
],
)
shutil.rmtree(tmpdir)
class TestTrain(RecordsTestCase):
async def test_run(self):
await Train.cli(
"-model",
"fake",
"-model-features",
"fake",
"-model-predict",
"fake",
"-sources",
"primary=json",
"-source-filename",
self.temp_filename,
)
class TestPredict(RecordsTestCase):
async def test_all(self):
results = await Predict.cli(
"all",
"-model",
"fake",
"-model-features",
"fake:float:1",
"-model-predict",
"fake",
"-sources",
"primary=json",
"-source-filename",
self.temp_filename,
)
results = {
record.key: record.prediction("fake").confidence
for record in results
}
for record in self.records:
self.assertEqual(float(record.key), results[record.key])
async def test_record(self):
subset = self.records[: (int(len(self.records) / 2))]
subset_urls = list(map(lambda record: record.key, subset))
results = await Predict.cli(
"record",
"-model",
"fake",
"-model-predict",
"fake",
"-model-features",
"fake",
"-sources",
"primary=json",
"-source-filename",
self.temp_filename,
"-keys",
*subset_urls,
)
self.assertEqual(len(results), len(subset))
results = {
record.key: record.prediction("fake").confidence
for record in results
}
for record in subset:
self.assertEqual(float(record.key), results[record.key])
|
[
"os.unlink",
"dffml.cli.list.List.cli",
"pathlib.Path",
"os.close",
"dffml.cli.ml.Train.cli",
"shutil.rmtree",
"dffml.source.csv.CSVSourceConfig",
"dffml.util.entrypoint.entrypoint",
"dffml.source.json.JSONSource",
"tempfile.mkdtemp",
"dffml.cli.cli.Merge.cli",
"dffml.util.asynctestcase.non_existant_tempfile",
"io.StringIO",
"unittest.mock.patch",
"random.random",
"contextlib.redirect_stdout",
"dffml.source.file.FileSourceConfig",
"dffml.cli.dataflow.Dataflow.cli",
"tempfile.mkstemp",
"dffml.cli.ml.Predict.cli",
"os.path.expanduser"
] |
[((3039, 3057), 'dffml.util.entrypoint.entrypoint', 'entrypoint', (['"""fake"""'], {}), "('fake')\n", (3049, 3057), False, 'from dffml.util.entrypoint import entrypoint\n'), ((1410, 1488), 'dffml.source.file.FileSourceConfig', 'FileSourceConfig', ([], {'filename': 'self.temp_filename', 'readwrite': '(True)', 'allowempty': '(True)'}), '(filename=self.temp_filename, readwrite=True, allowempty=True)\n', (1426, 1488), False, 'from dffml.source.file import FileSourceConfig\n'), ((2532, 2555), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (2550, 2555), False, 'import os\n'), ((9035, 9064), 'os.unlink', 'os.unlink', (['self.temp_filename'], {}), '(self.temp_filename)\n', (9044, 9064), False, 'import os\n'), ((9268, 9286), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (9284, 9286), False, 'import tempfile\n'), ((9319, 9363), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': '""".json"""', 'dir': 'tmpdir'}), "(suffix='.json', dir=tmpdir)\n", (9335, 9363), False, 'import tempfile\n'), ((9372, 9388), 'os.close', 'os.close', (['handle'], {}), '(handle)\n', (9380, 9388), False, 'import os\n'), ((10674, 10695), 'shutil.rmtree', 'shutil.rmtree', (['tmpdir'], {}), '(tmpdir)\n', (10687, 10695), False, 'import shutil\n'), ((10966, 10995), 'os.unlink', 'os.unlink', (['self.temp_filename'], {}), '(self.temp_filename)\n', (10975, 10995), False, 'import os\n'), ((11199, 11217), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (11215, 11217), False, 'import tempfile\n'), ((11250, 11294), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': '""".json"""', 'dir': 'tmpdir'}), "(suffix='.json', dir=tmpdir)\n", (11266, 11294), False, 'import tempfile\n'), ((11303, 11319), 'os.close', 'os.close', (['handle'], {}), '(handle)\n', (11311, 11319), False, 'import os\n'), ((12493, 12514), 'shutil.rmtree', 'shutil.rmtree', (['tmpdir'], {}), '(tmpdir)\n', (12506, 12514), False, 'import shutil\n'), ((12608, 12626), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (12624, 12626), False, 'import tempfile\n'), ((12659, 12703), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': '""".json"""', 'dir': 'tmpdir'}), "(suffix='.json', dir=tmpdir)\n", (12675, 12703), False, 'import tempfile\n'), ((12712, 12728), 'os.close', 'os.close', (['handle'], {}), '(handle)\n', (12720, 12728), False, 'import os\n'), ((13585, 13606), 'shutil.rmtree', 'shutil.rmtree', (['tmpdir'], {}), '(tmpdir)\n', (13598, 13606), False, 'import shutil\n'), ((13702, 13720), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (13718, 13720), False, 'import tempfile\n'), ((13753, 13797), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': '""".json"""', 'dir': 'tmpdir'}), "(suffix='.json', dir=tmpdir)\n", (13769, 13797), False, 'import tempfile\n'), ((13806, 13822), 'os.close', 'os.close', (['handle'], {}), '(handle)\n', (13814, 13822), False, 'import os\n'), ((14973, 14994), 'shutil.rmtree', 'shutil.rmtree', (['tmpdir'], {}), '(tmpdir)\n', (14986, 14994), False, 'import shutil\n'), ((1530, 1554), 'dffml.source.json.JSONSource', 'JSONSource', (['self.sconfig'], {}), '(self.sconfig)\n', (1540, 1554), False, 'from dffml.source.json import JSONSource\n'), ((2120, 2173), 'unittest.mock.patch', 'patch', (['"""dffml.model.model.Model.load"""'], {'new': 'model_load'}), "('dffml.model.model.Model.load', new=model_load)\n", (2125, 2173), False, 'from unittest.mock import patch\n'), ((2231, 2298), 'unittest.mock.patch', 'patch', (['"""dffml.df.base.OperationImplementation.load"""'], {'new': 'opimp_load'}), "('dffml.df.base.OperationImplementation.load', new=opimp_load)\n", (2236, 2298), False, 'from unittest.mock import patch\n'), ((2356, 2407), 'unittest.mock.patch', 'patch', (['"""dffml.df.types.Operation.load"""'], {'new': 'op_load'}), "('dffml.df.types.Operation.load', new=op_load)\n", (2361, 2407), False, 'from unittest.mock import patch\n'), ((3571, 3846), 'dffml.cli.cli.Merge.cli', 'Merge.cli', (['"""src=json"""', '"""dest=json"""', '"""-source-src-filename"""', 'self.temp_filename', '"""-source-dest-filename"""', 'self.temp_filename', '"""-source-dest-tag"""', '"""sometag"""', '"""-source-src-allowempty"""', '"""-source-dest-allowempty"""', '"""-source-src-readwrite"""', '"""-source-dest-readwrite"""'], {}), "('src=json', 'dest=json', '-source-src-filename', self.\n temp_filename, '-source-dest-filename', self.temp_filename,\n '-source-dest-tag', 'sometag', '-source-src-allowempty',\n '-source-dest-allowempty', '-source-src-readwrite',\n '-source-dest-readwrite')\n", (3580, 3846), False, 'from dffml.cli.cli import Merge\n'), ((4829, 4852), 'dffml.util.asynctestcase.non_existant_tempfile', 'non_existant_tempfile', ([], {}), '()\n', (4850, 4852), False, 'from dffml.util.asynctestcase import AsyncTestCase, AsyncTestCase, non_existant_tempfile\n'), ((5741, 5764), 'dffml.util.asynctestcase.non_existant_tempfile', 'non_existant_tempfile', ([], {}), '()\n', (5762, 5764), False, 'from dffml.util.asynctestcase import AsyncTestCase, AsyncTestCase, non_existant_tempfile\n'), ((8377, 8513), 'dffml.cli.list.List.cli', 'List.cli', (['"""records"""', '"""-sources"""', '"""primary=json"""', '"""-source-primary-filename"""', 'self.temp_filename', '"""-source-primary-readwrite"""', '"""true"""'], {}), "('records', '-sources', 'primary=json', '-source-primary-filename',\n self.temp_filename, '-source-primary-readwrite', 'true')\n", (8385, 8513), False, 'from dffml.cli.list import List\n'), ((9084, 9108), 'dffml.source.json.JSONSource', 'JSONSource', (['self.sconfig'], {}), '(self.sconfig)\n', (9094, 9108), False, 'from dffml.source.json import JSONSource\n'), ((9475, 9488), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (9486, 9488), False, 'import io\n'), ((11015, 11039), 'dffml.source.json.JSONSource', 'JSONSource', (['self.sconfig'], {}), '(self.sconfig)\n', (11025, 11039), False, 'from dffml.source.json import JSONSource\n'), ((11406, 11419), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (11417, 11419), False, 'import io\n'), ((12815, 12828), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (12826, 12828), False, 'import io\n'), ((13910, 13923), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (13921, 13923), False, 'import io\n'), ((15075, 15227), 'dffml.cli.ml.Train.cli', 'Train.cli', (['"""-model"""', '"""fake"""', '"""-model-features"""', '"""fake"""', '"""-model-predict"""', '"""fake"""', '"""-sources"""', '"""primary=json"""', '"""-source-filename"""', 'self.temp_filename'], {}), "('-model', 'fake', '-model-features', 'fake', '-model-predict',\n 'fake', '-sources', 'primary=json', '-source-filename', self.temp_filename)\n", (15084, 15227), False, 'from dffml.cli.ml import Train, Accuracy, Predict\n'), ((15447, 15620), 'dffml.cli.ml.Predict.cli', 'Predict.cli', (['"""all"""', '"""-model"""', '"""fake"""', '"""-model-features"""', '"""fake:float:1"""', '"""-model-predict"""', '"""fake"""', '"""-sources"""', '"""primary=json"""', '"""-source-filename"""', 'self.temp_filename'], {}), "('all', '-model', 'fake', '-model-features', 'fake:float:1',\n '-model-predict', 'fake', '-sources', 'primary=json',\n '-source-filename', self.temp_filename)\n", (15458, 15620), False, 'from dffml.cli.ml import Train, Accuracy, Predict\n'), ((16173, 16364), 'dffml.cli.ml.Predict.cli', 'Predict.cli', (['"""record"""', '"""-model"""', '"""fake"""', '"""-model-predict"""', '"""fake"""', '"""-model-features"""', '"""fake"""', '"""-sources"""', '"""primary=json"""', '"""-source-filename"""', 'self.temp_filename', '"""-keys"""', '*subset_urls'], {}), "('record', '-model', 'fake', '-model-predict', 'fake',\n '-model-features', 'fake', '-sources', 'primary=json',\n '-source-filename', self.temp_filename, '-keys', *subset_urls)\n", (16184, 16364), False, 'from dffml.cli.ml import Train, Accuracy, Predict\n'), ((2975, 2990), 'random.random', 'random.random', ([], {}), '()\n', (2988, 2990), False, 'import random\n'), ((4888, 5152), 'dffml.cli.cli.Merge.cli', 'Merge.cli', (['"""src=json"""', '"""dest=csv"""', '"""-source-src-filename"""', 'self.temp_filename', '"""-source-dest-filename"""', 'csv_tempfile', '"""-source-dest-key"""', '"""key"""', '"""-source-src-allowempty"""', '"""-source-dest-allowempty"""', '"""-source-src-readwrite"""', '"""-source-dest-readwrite"""'], {}), "('src=json', 'dest=csv', '-source-src-filename', self.\n temp_filename, '-source-dest-filename', csv_tempfile,\n '-source-dest-key', 'key', '-source-src-allowempty',\n '-source-dest-allowempty', '-source-src-readwrite',\n '-source-dest-readwrite')\n", (4897, 5152), False, 'from dffml.cli.cli import Merge\n'), ((9506, 9542), 'contextlib.redirect_stdout', 'contextlib.redirect_stdout', (['dataflow'], {}), '(dataflow)\n', (9532, 9542), False, 'import contextlib\n'), ((9875, 10113), 'dffml.cli.dataflow.Dataflow.cli', 'Dataflow.cli', (['"""run"""', '"""records"""', '"""all"""', '"""-dataflow"""', 'dataflow_file.name', '"""primary=json"""', '"""-sources"""', '"""primary=json"""', '"""-source-filename"""', 'self.temp_filename', '"""-record-def"""', '"""calc_string"""', '"""-inputs"""', '"""["result"]=get_single_spec"""'], {}), '(\'run\', \'records\', \'all\', \'-dataflow\', dataflow_file.name,\n \'primary=json\', \'-sources\', \'primary=json\', \'-source-filename\', self.\n temp_filename, \'-record-def\', \'calc_string\', \'-inputs\',\n \'["result"]=get_single_spec\')\n', (9887, 10113), False, 'from dffml.cli.dataflow import Dataflow\n'), ((11437, 11473), 'contextlib.redirect_stdout', 'contextlib.redirect_stdout', (['dataflow'], {}), '(dataflow)\n', (11463, 11473), False, 'import contextlib\n'), ((11806, 12062), 'dffml.cli.dataflow.Dataflow.cli', 'Dataflow.cli', (['"""run"""', '"""records"""', '"""set"""', '"""-keys"""', 'test_key', '"""-dataflow"""', 'dataflow_file.name', '"""primary=json"""', '"""-sources"""', '"""primary=json"""', '"""-source-filename"""', 'self.temp_filename', '"""-record-def"""', '"""calc_string"""', '"""-inputs"""', '"""["result"]=get_single_spec"""'], {}), '(\'run\', \'records\', \'set\', \'-keys\', test_key, \'-dataflow\',\n dataflow_file.name, \'primary=json\', \'-sources\', \'primary=json\',\n \'-source-filename\', self.temp_filename, \'-record-def\', \'calc_string\',\n \'-inputs\', \'["result"]=get_single_spec\')\n', (11818, 12062), False, 'from dffml.cli.dataflow import Dataflow\n'), ((12846, 12882), 'contextlib.redirect_stdout', 'contextlib.redirect_stdout', (['dataflow'], {}), '(dataflow)\n', (12872, 12882), False, 'import contextlib\n'), ((13215, 13350), 'dffml.cli.dataflow.Dataflow.cli', 'Dataflow.cli', (['"""run"""', '"""single"""', '"""-dataflow"""', 'dataflow_file.name', '"""-inputs"""', '"""["result"]=get_single_spec"""', '"""add 40 and 2=calc_string"""'], {}), '(\'run\', \'single\', \'-dataflow\', dataflow_file.name, \'-inputs\',\n \'["result"]=get_single_spec\', \'add 40 and 2=calc_string\')\n', (13227, 13350), False, 'from dffml.cli.dataflow import Dataflow\n'), ((13941, 13977), 'contextlib.redirect_stdout', 'contextlib.redirect_stdout', (['dataflow'], {}), '(dataflow)\n', (13967, 13977), False, 'import contextlib\n'), ((1245, 1260), 'random.random', 'random.random', ([], {}), '()\n', (1258, 1260), False, 'import random\n'), ((1727, 1754), 'pathlib.Path', 'Path', (['self.sconfig.filename'], {}), '(self.sconfig.filename)\n', (1731, 1754), False, 'from pathlib import Path\n'), ((4112, 4157), 'dffml.source.file.FileSourceConfig', 'FileSourceConfig', ([], {'filename': 'self.temp_filename'}), '(filename=self.temp_filename)\n', (4128, 4157), False, 'from dffml.source.file import FileSourceConfig\n'), ((4502, 4562), 'dffml.source.file.FileSourceConfig', 'FileSourceConfig', ([], {'filename': 'self.temp_filename', 'tag': '"""sometag"""'}), "(filename=self.temp_filename, tag='sometag')\n", (4518, 4562), False, 'from dffml.source.file import FileSourceConfig\n'), ((5366, 5384), 'pathlib.Path', 'Path', (['csv_tempfile'], {}), '(csv_tempfile)\n', (5370, 5384), False, 'from pathlib import Path\n'), ((5916, 6149), 'dffml.cli.cli.Merge.cli', 'Merge.cli', (['"""src=json"""', '"""dest=csv"""', '"""-source-src-filename"""', 'self.temp_filename', '"""-source-dest-filename"""', 'csv_tempfile', '"""-source-src-allowempty"""', '"""-source-dest-allowempty"""', '"""-source-src-readwrite"""', '"""-source-dest-readwrite"""'], {}), "('src=json', 'dest=csv', '-source-src-filename', self.\n temp_filename, '-source-dest-filename', csv_tempfile,\n '-source-src-allowempty', '-source-dest-allowempty',\n '-source-src-readwrite', '-source-dest-readwrite')\n", (5925, 6149), False, 'from dffml.cli.cli import Merge\n'), ((6491, 6747), 'dffml.cli.cli.Merge.cli', 'Merge.cli', (['"""src=csv"""', '"""dest=csv"""', '"""-source-src-filename"""', 'csv_tempfile', '"""-source-dest-filename"""', 'csv_tempfile', '"""-source-dest-tag"""', '"""sometag"""', '"""-source-src-allowempty"""', '"""-source-dest-allowempty"""', '"""-source-src-readwrite"""', '"""-source-dest-readwrite"""'], {}), "('src=csv', 'dest=csv', '-source-src-filename', csv_tempfile,\n '-source-dest-filename', csv_tempfile, '-source-dest-tag', 'sometag',\n '-source-src-allowempty', '-source-dest-allowempty',\n '-source-src-readwrite', '-source-dest-readwrite')\n", (6500, 6747), False, 'from dffml.cli.cli import Merge\n'), ((7018, 7036), 'pathlib.Path', 'Path', (['csv_tempfile'], {}), '(csv_tempfile)\n', (7022, 7036), False, 'from pathlib import Path\n'), ((7578, 7596), 'pathlib.Path', 'Path', (['csv_tempfile'], {}), '(csv_tempfile)\n', (7582, 7596), False, 'from pathlib import Path\n'), ((8156, 8174), 'pathlib.Path', 'Path', (['csv_tempfile'], {}), '(csv_tempfile)\n', (8160, 8174), False, 'from pathlib import Path\n'), ((7286, 7324), 'dffml.source.csv.CSVSourceConfig', 'CSVSourceConfig', ([], {'filename': 'csv_tempfile'}), '(filename=csv_tempfile)\n', (7301, 7324), False, 'from dffml.source.csv import CSVSource, CSVSourceConfig\n'), ((7849, 7902), 'dffml.source.csv.CSVSourceConfig', 'CSVSourceConfig', ([], {'filename': 'csv_tempfile', 'tag': '"""sometag"""'}), "(filename=csv_tempfile, tag='sometag')\n", (7864, 7902), False, 'from dffml.source.csv import CSVSource, CSVSourceConfig\n')]
|
__package__ = 'archivebox.extractors'
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import Optional
import json
from ..index.schema import Link, ArchiveResult, ArchiveError
from ..system import run, atomic_write
from ..util import (
enforce_types,
download_url,
is_static_file,
)
from ..config import (
TIMEOUT,
CURL_BINARY,
SAVE_READABILITY,
DEPENDENCIES,
READABILITY_VERSION,
)
from ..logging_util import TimedProgress
@enforce_types
def get_html(link: Link, path: Path) -> str:
"""
Try to find wget, singlefile and then dom files.
If none is found, download the url again.
"""
canonical = link.canonical_outputs()
abs_path = path.absolute()
sources = [canonical["singlefile_path"], canonical["wget_path"], canonical["dom_path"]]
document = None
for source in sources:
try:
with open(abs_path / source, "r", encoding="utf-8") as f:
document = f.read()
break
except (FileNotFoundError, TypeError):
continue
if document is None:
return download_url(link.url)
else:
return document
@enforce_types
def should_save_readability(link: Link, out_dir: Optional[str]=None, overwrite: Optional[bool]=False) -> bool:
if is_static_file(link.url):
return False
out_dir = out_dir or Path(link.link_dir)
if not overwrite and (out_dir / 'readability').exists():
return False
return SAVE_READABILITY
@enforce_types
def save_readability(link: Link, out_dir: Optional[str]=None, timeout: int=TIMEOUT) -> ArchiveResult:
"""download reader friendly version using @mozilla/readability"""
out_dir = Path(out_dir or link.link_dir)
output_folder = out_dir.absolute() / "readability"
output = "readability"
# Readability Docs: https://github.com/mozilla/readability
status = 'succeeded'
# fake command to show the user so they have something to try debugging if get_html fails
cmd = [
CURL_BINARY,
link.url
]
readability_content = None
timer = TimedProgress(timeout, prefix=' ')
try:
document = get_html(link, out_dir)
temp_doc = NamedTemporaryFile(delete=False)
temp_doc.write(document.encode("utf-8"))
temp_doc.close()
if not document or len(document) < 10:
raise ArchiveError('Readability could not find HTML to parse for article text')
cmd = [
DEPENDENCIES['READABILITY_BINARY']['path'],
temp_doc.name,
]
result = run(cmd, cwd=out_dir, timeout=timeout)
try:
result_json = json.loads(result.stdout)
assert result_json and 'content' in result_json
except json.JSONDecodeError:
raise ArchiveError('Readability was not able to archive the page', result.stdout + result.stderr)
output_folder.mkdir(exist_ok=True)
readability_content = result_json.pop("textContent")
atomic_write(str(output_folder / "content.html"), result_json.pop("content"))
atomic_write(str(output_folder / "content.txt"), readability_content)
atomic_write(str(output_folder / "article.json"), result_json)
# parse out number of files downloaded from last line of stderr:
# "Downloaded: 76 files, 4.0M in 1.6s (2.52 MB/s)"
output_tail = [
line.strip()
for line in (result.stdout + result.stderr).decode().rsplit('\n', 3)[-3:]
if line.strip()
]
hints = (
'Got readability response code: {}.'.format(result.returncode),
*output_tail,
)
# Check for common failure cases
if (result.returncode > 0):
raise ArchiveError('Readability was not able to archive the page', hints)
except (Exception, OSError) as err:
status = 'failed'
output = err
cmd = [cmd[0], './{singlefile,dom}.html']
finally:
timer.end()
return ArchiveResult(
cmd=cmd,
pwd=str(out_dir),
cmd_version=READABILITY_VERSION,
output=output,
status=status,
index_texts=[readability_content] if readability_content else [],
**timer.stats,
)
|
[
"tempfile.NamedTemporaryFile",
"pathlib.Path",
"json.loads"
] |
[((1727, 1757), 'pathlib.Path', 'Path', (['(out_dir or link.link_dir)'], {}), '(out_dir or link.link_dir)\n', (1731, 1757), False, 'from pathlib import Path\n'), ((1392, 1411), 'pathlib.Path', 'Path', (['link.link_dir'], {}), '(link.link_dir)\n', (1396, 1411), False, 'from pathlib import Path\n'), ((2234, 2266), 'tempfile.NamedTemporaryFile', 'NamedTemporaryFile', ([], {'delete': '(False)'}), '(delete=False)\n', (2252, 2266), False, 'from tempfile import NamedTemporaryFile\n'), ((2687, 2712), 'json.loads', 'json.loads', (['result.stdout'], {}), '(result.stdout)\n', (2697, 2712), False, 'import json\n')]
|
"""Bayesian Optimization sampler : Defined only for continuous domains.
For discrete inputs define another sampler"""
from verifai.samplers.domain_sampler import DomainSampler
import numpy as np
class BayesOptSampler(DomainSampler):
def __init__(self, domain, BO_params):
try:
import GPyOpt
except ModuleNotFoundError:
import sys
sys.exit('BayesOptSampler requires GPyOpt to be installed')
super().__init__(domain)
self.dimension = domain.standardizedDimension
if not self.dimension:
raise RuntimeError(f'{self.__class__.__name__} supports only'
' continuous standardizable Domains')
self.f = BO_params.f
self.init_num = BO_params.init_num
self.bounds = []
for i in range(self.dimension):
self.bounds.append({'name':'x_'+str(i), 'type': 'continuous',
'domain': (0,1)})
self.X = None
self.Y = None
self.BO = None
def nextSample(self):
import GPyOpt # do this here to avoid slow import when unused
if self.X is None or len(self.X) < self.init_num:
print("Doing random sampling")
sample = np.random.uniform(0,1, self.dimension)
if self.X is None:
self.X= np.atleast_2d(sample)
sample = self.domain.unstandardize(sample)
self.Y = np.atleast_2d(self.f(sample))
else:
self.X = np.vstack((self.X, np.atleast_2d(sample)))
sample = self.domain.unstandardize(sample)
self.Y = np.vstack((self.Y, np.atleast_2d(self.f(sample))))
return sample
print("Doing BO")
self.BO = GPyOpt.methods.BayesianOptimization(
f=lambda sample: self.f(self.domain.unstandardize(tuple(sample[0]))),
domain=self.bounds, X=self.X, Y=self.Y, normalize_Y=False)
self.BO.run_optimization(1)
self.X = np.vstack((self.X,np.atleast_2d(self.BO.X[-1])))
self.Y = np.vstack((self.Y, np.atleast_2d(self.BO.Y[-1])))
return self.domain.unstandardize(tuple(self.X[-1]))
|
[
"numpy.random.uniform",
"sys.exit",
"numpy.atleast_2d"
] |
[((1261, 1300), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', 'self.dimension'], {}), '(0, 1, self.dimension)\n', (1278, 1300), True, 'import numpy as np\n'), ((390, 449), 'sys.exit', 'sys.exit', (['"""BayesOptSampler requires GPyOpt to be installed"""'], {}), "('BayesOptSampler requires GPyOpt to be installed')\n", (398, 449), False, 'import sys\n'), ((1355, 1376), 'numpy.atleast_2d', 'np.atleast_2d', (['sample'], {}), '(sample)\n', (1368, 1376), True, 'import numpy as np\n'), ((2044, 2072), 'numpy.atleast_2d', 'np.atleast_2d', (['self.BO.X[-1]'], {}), '(self.BO.X[-1])\n', (2057, 2072), True, 'import numpy as np\n'), ((2111, 2139), 'numpy.atleast_2d', 'np.atleast_2d', (['self.BO.Y[-1]'], {}), '(self.BO.Y[-1])\n', (2124, 2139), True, 'import numpy as np\n'), ((1553, 1574), 'numpy.atleast_2d', 'np.atleast_2d', (['sample'], {}), '(sample)\n', (1566, 1574), True, 'import numpy as np\n')]
|
import unittest
from get_nsu_temp import get_nsu_temp
from get_nsu_temp import message_nsu_temp
class TestStringMethods(unittest.TestCase):
def test_get_nsu_temp_is_str(self):
self.assertTrue(isinstance(get_nsu_temp()[0], str))
def test_get_nsu_temp_is_not_empty(self):
self.assertTrue(len(get_nsu_temp()[0])!=0)
def test_message_nsu_temp_has_6_words(self):
self.assertEqual(len(message_nsu_temp('-10C').split(' ')), 6)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"get_nsu_temp.get_nsu_temp",
"get_nsu_temp.message_nsu_temp"
] |
[((496, 511), 'unittest.main', 'unittest.main', ([], {}), '()\n', (509, 511), False, 'import unittest\n'), ((217, 231), 'get_nsu_temp.get_nsu_temp', 'get_nsu_temp', ([], {}), '()\n', (229, 231), False, 'from get_nsu_temp import get_nsu_temp\n'), ((321, 335), 'get_nsu_temp.get_nsu_temp', 'get_nsu_temp', ([], {}), '()\n', (333, 335), False, 'from get_nsu_temp import get_nsu_temp\n'), ((423, 447), 'get_nsu_temp.message_nsu_temp', 'message_nsu_temp', (['"""-10C"""'], {}), "('-10C')\n", (439, 447), False, 'from get_nsu_temp import message_nsu_temp\n')]
|
import torch.nn as nn
import torch.nn.functional as F
from layer import GraphConvolution
class GCN(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout):
super(GCN, self).__init__()
self.gc1 = GraphConvolution(nfeat, nhid)
self.gc2 = GraphConvolution(nhid, nclass)
self.dropout = dropout
def forward(self, x, adj):
x = F.relu(self.gc1(x, adj))
x = F.dropout(x, self.dropout, training=self.training)
x = self.gc2(x, adj)
return F.log_softmax(x, dim=1)
|
[
"torch.nn.functional.dropout",
"layer.GraphConvolution",
"torch.nn.functional.log_softmax"
] |
[((224, 253), 'layer.GraphConvolution', 'GraphConvolution', (['nfeat', 'nhid'], {}), '(nfeat, nhid)\n', (240, 253), False, 'from layer import GraphConvolution\n'), ((273, 303), 'layer.GraphConvolution', 'GraphConvolution', (['nhid', 'nclass'], {}), '(nhid, nclass)\n', (289, 303), False, 'from layer import GraphConvolution\n'), ((416, 466), 'torch.nn.functional.dropout', 'F.dropout', (['x', 'self.dropout'], {'training': 'self.training'}), '(x, self.dropout, training=self.training)\n', (425, 466), True, 'import torch.nn.functional as F\n'), ((511, 534), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['x'], {'dim': '(1)'}), '(x, dim=1)\n', (524, 534), True, 'import torch.nn.functional as F\n')]
|
# -*- coding: utf-8 -*-
from djangocms_text_ckeditor.models import Text
from cms.api import create_page, add_plugin
from cms.models import Page
from cms.models.placeholdermodel import Placeholder
from cms.models.pluginmodel import CMSPlugin
from cms.tests.test_plugins import PluginsTestBaseCase
from cms.utils.compat.tests import UnittestCompatMixin
from cms.utils.copy_plugins import copy_plugins_to
from cms.utils.plugins import reorder_plugins
class NestedPluginsTestCase(PluginsTestBaseCase, UnittestCompatMixin):
def reorder_positions(self, plugin=None, parent=None):
if parent:
parent_id = parent.pk
plugin = parent
else:
parent_id = plugin.parent_id
x = 0
for p in CMSPlugin.objects.filter(parent_id=parent_id, language=plugin.language, placeholder_id=plugin.placeholder_id):
p.position = x
p.save()
x += 1
def copy_placeholders_and_check_results(self, placeholders):
"""
This function is not itself a test; rather, it can be used by any test
that has created placeholders. It will check that whatever the plugin
structure in the placeholder, it will be copied accurately when they are
copied.
placeholders is a list of placeholders
"""
for original_placeholder in placeholders:
# get the plugins
original_plugins = original_placeholder.get_plugins()
# copy them to a new placeholder
copied_placeholder = Placeholder.objects.create(slot=original_placeholder.slot)
copy_plugins_to(
original_placeholder.get_plugins(),
copied_placeholder
)
copied_plugins = copied_placeholder.get_plugins()
# we should find the same number of plugins in both placeholders
self.assertEqual(
original_plugins.count(),
copied_plugins.count()
)
# quick check: make sure the two querysets match:
for original, copy in zip(original_plugins, copied_plugins):
self.assertEqual(
Text.objects.get(id=original.id).body,
Text.objects.get(id=copy.id).body
)
# Now build a *tree* of the plugins, and match those - it's not
# enough just to compare querysets as above; we should *also* check
# that when we build a tree, the various nodes are assembled as we
# would expect. We will pump the trees into a pair of lists:
original_plugins_list = []
copied_plugins_list = []
# This function builds the tree of plugins, starting from its roots.
# In that respect it's like many of the plugin tree-building
# routines elsewhere in the system.
def plugin_list_from_tree(roots, plugin_list):
for plugin in roots:
plugin_list.append(plugin)
# recurse over the set of nodes
plugin_list_from_tree(plugin.get_children(), plugin_list)
# build the tree for each set of plugins
plugin_list_from_tree(original_plugins.filter(depth=1), original_plugins_list)
plugin_list_from_tree(copied_plugins.filter(depth=1), copied_plugins_list)
self.assertEqual(len(original_plugins_list), original_plugins.count())
self.assertEqual(len(copied_plugins_list), copied_plugins.count())
# Check that each pair of items in the two lists match, in lots of
# different ways
for original, copy in zip(original_plugins_list, copied_plugins_list):
original_text_plugin = Text.objects.get(id=original.id)
copied_text_plugin = Text.objects.get(id=copy.id)
# This first one is a sanity test, just to prove that we aren't
# simply comparing *exactly the same items* in all these tests.
# It could happen...
self.assertNotEquals(original.id, copy.id)
self.assertEqual(
original_text_plugin.body,
copied_text_plugin.body
)
self.assertEqual(
original_text_plugin.depth,
copied_text_plugin.depth
)
self.assertEqual(
original_text_plugin.position,
copied_text_plugin.position
)
self.assertEqual(
original_text_plugin.numchild,
copied_text_plugin.numchild
)
self.assertEqual(
original_text_plugin.get_descendant_count(),
copied_text_plugin.get_descendant_count()
)
self.assertEqual(
original_text_plugin.get_ancestors().count(),
copied_text_plugin.get_ancestors().count()
)
# just in case the test method that called us wants it:
return copied_placeholder
def test_plugin_fix_tree(self):
"""
Tests CMSPlugin.fix_tree by creating a plugin structure, setting the
position value to Null for all the plugins and then rebuild the tree.
The structure below isn't arbitrary, but has been designed to test
various conditions, including:
* nodes four levels deep
* siblings with and without children
1
2
4
10
8
3
9
5
6
7
"""
placeholder = Placeholder(slot=u"some_slot")
placeholder.save() # a good idea, if not strictly necessary
# plugin in placeholder
plugin_1 = add_plugin(placeholder, u"TextPlugin", u"en", body=u"01")
# IMPORTANT: plugins must be reloaded, before they can be assigned
# as a parent. Otherwise, the Tree structure doesn't seem to rebuild
# properly.
# child of plugin_1
plugin_1 = self.reload(plugin_1)
plugin_2 = add_plugin(placeholder, u"TextPlugin", u"en", # nopyflakes
body=u"02", target=plugin_1,
)
# create a second child of plugin_1
plugin_1 = self.reload(plugin_1)
plugin_3 = add_plugin(placeholder, u"TextPlugin", u"en", # nopyflakes
body=u"03", target=plugin_1
)
# child of plugin_2
plugin_2 = self.reload(plugin_2)
plugin_4 = add_plugin(placeholder, u"TextPlugin", u"en", # nopyflakes
body=u"04", target=plugin_2
)
plugin_1 = self.reload(plugin_1) # nopyflakes
# create a second root plugin
plugin_5 = add_plugin(placeholder, u"TextPlugin", u"en", body=u"05")
left = CMSPlugin.objects.filter(parent__isnull=True).order_by('path')[0]
plugin_5 = self.reload(plugin_5)
plugin_5 = plugin_5.move(left, pos='right')
self.reorder_positions(plugin_5)
self.reorder_positions(plugin_2)
# child of plugin_5
plugin_5 = self.reload(plugin_5)
plugin_6 = add_plugin(placeholder, u"TextPlugin", u"en", # nopyflakes
body=u"06", target=plugin_5
)
# child of plugin_6
plugin_5 = self.reload(plugin_5)
plugin_7 = add_plugin(placeholder, u"TextPlugin", u"en", # nopyflakes
body=u"07", target=plugin_5
)
# another child of plugin_2
plugin_2 = self.reload(plugin_2)
plugin_8 = add_plugin(placeholder, u"TextPlugin", u"en", # nopyflakes
body=u"08", target=plugin_2
)
# child of plugin_3
plugin_3 = self.reload(plugin_3)
plugin_9 = add_plugin(placeholder, u"TextPlugin", u"en", # nopyflakes
body=u"09", target=plugin_3
)
# child of plugin_4
plugin_4 = self.reload(plugin_4)
plugin_10 = add_plugin(placeholder, u"TextPlugin", u"en", # nopyflakes
body=u"10", target=plugin_4
)
# We do two comparisons here.
# One is to compare plugin position values
# per plugin instance.
# To do this we get a dictionary mapping plugin
# ids to their respective position.
# The second comparison is to make sure that
# plugins retain their position/path ordering.
# The reason for the these comparisons
# is because of an obscure behavior with postgres
# where somehow items with the same value that are
# sorted by that value will be returned in different
# order based on the orm query construction.
# By comparing ids with positions, we make sure that
# each plugin has the correct position after the fix-tree.
# See ticket #5291
plugins = (
CMSPlugin
.objects
.filter(placeholder=placeholder)
)
# Maps plugin ids to positions
original_plugin_positions = dict(
plugins
.order_by('position')
.values_list('pk', 'position')
)
# List of plugin ids sorted by position and path
original_plugin_ids = list(
plugins
.order_by('position', 'path')
.values_list('pk', flat=True)
)
# We use 1 to effectively "break" the tree
# and as a way to test that fixing trees with
# equal position values retains the correct ordering.
CMSPlugin.objects.update(position=1)
CMSPlugin.fix_tree()
new_plugin_positions = dict(
plugins
.order_by('position')
.values_list('pk', 'position')
)
new_plugin_ids = list(
plugins
.order_by('position', 'path')
.values_list('pk', flat=True)
)
self.assertDictEqual(original_plugin_positions, new_plugin_positions)
self.assertSequenceEqual(original_plugin_ids, new_plugin_ids)
# Now, check to see if the correct order is restored, even if we
# re-arrange the plugins so that their natural «pk» order is different
# than their «position» order.
# Move the 2nd top-level plugin to the "left" or before the 1st.
reorder_plugins(placeholder, None, u"en", [plugin_5.pk, plugin_1.pk])
reordered_plugins = list(placeholder.get_plugins().order_by('position', 'path'))
CMSPlugin.fix_tree()
# Now, they should NOT be in the original order at all. Are they?
new_plugins = list(placeholder.get_plugins().order_by('position', 'path'))
self.assertSequenceEqual(
reordered_plugins, new_plugins,
"Plugin order not preserved during fix_tree().")
def test_plugin_deep_nesting_and_copying(self):
"""
Create a deeply-nested plugin structure, tests its properties, and tests
that it is copied accurately when the placeholder containing them is
copied.
The structure below isn't arbitrary, but has been designed to test
various conditions, including:
* nodes four levels deep
* multiple successive level increases
* multiple successive level decreases
* successive nodes on the same level followed by level changes
* multiple level decreases between successive nodes
* siblings with and without children
* nodes and branches added to the tree out of sequence
First we create the structure:
11
1
2
12
4
10
8
3
9
5
6
7
13
14
and then we move it all around.
"""
placeholder = Placeholder(slot=u"some_slot")
placeholder.save() # a good idea, if not strictly necessary
# plugin in placeholder
plugin_1 = add_plugin(placeholder, u"TextPlugin", u"en",
body=u"01")
# IMPORTANT: plugins must be reloaded, before they can be assigned
# as a parent. Otherwise, the MPTT structure doesn't seem to rebuild
# properly.
# child of plugin_1
plugin_1 = self.reload(plugin_1)
plugin_2 = add_plugin(placeholder, u"TextPlugin", u"en",
body=u"02", target=plugin_1,
)
# plugin_2 should be plugin_1's only child
# for a single item we use assertSequenceEqual
self.assertSequenceEqual(
CMSPlugin.objects.get(id=plugin_1.pk).get_children(),
[CMSPlugin.objects.get(id=plugin_2.pk)])
# create a second child of plugin_1
plugin_1 = self.reload(plugin_1)
plugin_3 = add_plugin(placeholder, u"TextPlugin", u"en",
body=u"03", target=plugin_1
)
# plugin_2 & plugin_3 should be plugin_1's children
# for multiple items we use assertSequenceEqual, because
# assertSequenceEqual may re-order the list without warning
self.assertSequenceEqual(
CMSPlugin.objects.get(id=plugin_1.pk).get_children(),
[
CMSPlugin.objects.get(id=plugin_2.pk),
CMSPlugin.objects.get(id=plugin_3.pk),
])
# child of plugin_2
plugin_2 = self.reload(plugin_2)
plugin_4 = add_plugin(placeholder, u"TextPlugin", u"en",
body=u"04", target=plugin_2
)
# plugin_4 should be plugin_2's child
self.assertSequenceEqual(
CMSPlugin.objects.get(id=plugin_2.pk).get_children(),
[CMSPlugin.objects.get(id=plugin_4.pk)])
# 2,3 & 4 should be descendants of 1
self.assertSequenceEqual(
CMSPlugin.objects.get(id=plugin_1.pk).get_descendants(),
[
# note path ordering of MP reflected here:
CMSPlugin.objects.get(id=plugin_2.pk),
CMSPlugin.objects.get(id=plugin_4.pk),
CMSPlugin.objects.get(id=plugin_3.pk),
],
)
plugin_1 = self.reload(plugin_1)
# create a second root plugin
plugin_5 = add_plugin(placeholder, u"TextPlugin", u"en", body=u"05")
left = CMSPlugin.objects.filter(parent__isnull=True).order_by('path')[0]
plugin_5 = self.reload(plugin_5)
plugin_5 = plugin_5.move(left, pos='right')
self.reorder_positions(plugin_5)
self.reorder_positions(plugin_2)
# child of plugin_5
plugin_5 = self.reload(plugin_5)
plugin_6 = add_plugin(placeholder, u"TextPlugin", u"en",
body=u"06", target=plugin_5
)
# plugin_6 should be plugin_5's child
self.assertSequenceEqual(
CMSPlugin.objects.get(id=plugin_5.pk).get_children(),
[CMSPlugin.objects.get(id=plugin_6.pk)])
# child of plugin_6
plugin_5 = self.reload(plugin_5)
plugin_7 = add_plugin(placeholder, u"TextPlugin", u"en",
body=u"07", target=plugin_5
)
# plugin_7 should be plugin_5's child
self.assertSequenceEqual(
CMSPlugin.objects.get(id=plugin_5.pk).get_children(),
[
CMSPlugin.objects.get(id=plugin_6.pk),
CMSPlugin.objects.get(id=plugin_7.pk)
])
# 6 & 7 should be descendants of 5
self.assertSequenceEqual(
CMSPlugin.objects.get(id=plugin_5.pk).get_descendants(),
[
CMSPlugin.objects.get(id=plugin_6.pk),
CMSPlugin.objects.get(id=plugin_7.pk),
])
# another child of plugin_2
plugin_2 = self.reload(plugin_2)
plugin_8 = add_plugin(placeholder, u"TextPlugin", u"en",
body=u"08", target=plugin_2
)
# plugin_4 should be plugin_2's child
self.assertSequenceEqual(
CMSPlugin.objects.get(id=plugin_2.pk).get_children(),
[
CMSPlugin.objects.get(id=plugin_4.pk),
CMSPlugin.objects.get(id=plugin_8.pk),
])
# child of plugin_3
plugin_3 = self.reload(plugin_3)
plugin_9 = add_plugin(placeholder, u"TextPlugin", u"en",
body=u"09", target=plugin_3
)
# plugin_9 should be plugin_3's child
self.assertSequenceEqual(
CMSPlugin.objects.get(id=plugin_3.pk).get_children(),
[CMSPlugin.objects.get(id=plugin_9.pk)])
# child of plugin_4
plugin_4 = self.reload(plugin_4)
plugin_10 = add_plugin(placeholder, u"TextPlugin", u"en",
body=u"10", target=plugin_4
)
# plugin_10 should be plugin_4's child
self.assertSequenceEqual(
CMSPlugin.objects.get(id=plugin_4.pk).get_children(),
[CMSPlugin.objects.get(id=plugin_10.pk)])
original_plugins = placeholder.get_plugins()
self.assertEqual(original_plugins.count(), 10)
# elder sibling of plugin_1
plugin_1 = self.reload(plugin_1)
plugin_11 = add_plugin(placeholder, u"TextPlugin", u"en",
body=u"11",
target=plugin_1,
position="left"
)
self.assertSequenceEqual(
CMSPlugin.objects.get(id=plugin_1.pk).get_children(),
[
CMSPlugin.objects.get(id=plugin_2.pk),
CMSPlugin.objects.get(id=plugin_3.pk)
])
# elder sibling of plugin_4
plugin_4 = self.reload(plugin_4)
plugin_12 = add_plugin(placeholder, u"TextPlugin", u"en",
body=u"12",
target=plugin_4,
position="left"
)
self.assertSequenceEqual(
CMSPlugin.objects.get(id=plugin_2.pk).get_children(),
[
CMSPlugin.objects.get(id=plugin_12.pk),
CMSPlugin.objects.get(id=plugin_4.pk),
CMSPlugin.objects.get(id=plugin_8.pk)
])
# younger sibling of plugin_7
plugin_7 = self.reload(plugin_7)
plugin_13 = add_plugin(placeholder, u"TextPlugin", u"en",
body=u"13",
target=plugin_7,
position="right"
)
self.assertSequenceEqual(
CMSPlugin.objects.get(id=plugin_5.pk).get_children(),
[
CMSPlugin.objects.get(id=plugin_6.pk),
CMSPlugin.objects.get(id=plugin_7.pk),
CMSPlugin.objects.get(id=plugin_13.pk)
])
# new sibling of plugin_5
plugin_5 = self.reload(plugin_5)
plugin_14 = add_plugin(placeholder, u"TextPlugin", u"en",
body=u"14"
)
self.assertSequenceEqual(
CMSPlugin.objects.filter(depth=1).order_by('path'),
[
CMSPlugin.objects.get(id=plugin_11.pk),
CMSPlugin.objects.get(id=plugin_1.pk),
CMSPlugin.objects.get(id=plugin_5.pk),
CMSPlugin.objects.get(id=plugin_14.pk)
])
self.copy_placeholders_and_check_results([placeholder])
# now let's move plugins around in the tree
# move plugin_2 before plugin_11
plugin_2 = self.reload(plugin_2)
plugin_1 = self.reload(plugin_1)
old_parent = plugin_2.parent
plugin_2.parent_id = plugin_1.parent_id
plugin_2.save()
plugin_2 = plugin_2.move(target=plugin_1, pos="left")
self.reorder_positions(parent=old_parent)
self.reorder_positions(plugin_2)
self.copy_placeholders_and_check_results([placeholder])
# move plugin_6 after plugin_7
plugin_6 = self.reload(plugin_6)
plugin_7 = self.reload(plugin_7)
old_parent = plugin_6.parent
plugin_6.parent_id = plugin_7.parent_id
plugin_6.save()
plugin_6 = plugin_6.move(target=plugin_7, pos="right")
self.reorder_positions(parent=old_parent)
self.reorder_positions(plugin_6)
self.copy_placeholders_and_check_results([placeholder])
# move plugin_3 before plugin_2
plugin_2 = self.reload(plugin_2)
plugin_3 = self.reload(plugin_3)
old_parent = plugin_3.parent
plugin_3.parent_id = plugin_2.parent_id
plugin_3.save()
plugin_3 = plugin_3.move(target=plugin_2, pos="left")
self.reorder_positions(parent=old_parent)
self.reorder_positions(plugin_3)
self.copy_placeholders_and_check_results([placeholder])
# make plugin_3 plugin_2's first-child
plugin_2 = self.reload(plugin_2)
plugin_3 = self.reload(plugin_3)
old_parent = plugin_3.parent
plugin_3.parent_id = plugin_2.pk
plugin_3.save()
plugin_3 = plugin_3.move(target=plugin_2, pos="first-child")
self.reorder_positions(CMSPlugin.objects.filter(placeholder_id=plugin_3.placeholder_id, language=plugin_3.language, depth=1)[0])
self.reorder_positions(plugin_3)
self.copy_placeholders_and_check_results([placeholder])
# make plugin_7 plugin_2's first-child
plugin_3 = self.reload(plugin_3)
plugin_7 = self.reload(plugin_7)
old_parent = plugin_7.parent
plugin_7.parent_id = plugin_3.parent_id
plugin_7.save()
plugin_7 = plugin_7.move(target=plugin_3, pos="right")
self.reorder_positions(parent=old_parent)
self.reorder_positions(plugin_7)
self.copy_placeholders_and_check_results([placeholder, ])
def test_nested_plugin_on_page(self):
"""
Validate a textplugin with a nested link plugin
mptt values are correctly showing a parent child relationship
of a nested plugin
"""
with self.settings(CMS_PERMISSION=False):
# setup page 1
page_one = create_page(u"Three Placeholder", u"col_three.html", u"en",
position=u"last-child", published=True, in_navigation=True)
page_one_ph_two = page_one.placeholders.get(slot=u"col_left")
# add a plugin
pre_nesting_body = u"<p>the nested text plugin with a link inside</p>"
text_plugin = add_plugin(page_one_ph_two, u"TextPlugin", u"en", body=pre_nesting_body)
# prepare nestin plugin
page_one_ph_two = self.reload(page_one_ph_two)
text_plugin = self.reload(text_plugin)
link_plugin = add_plugin(page_one_ph_two, u"LinkPlugin", u"en", target=text_plugin)
link_plugin.name = u"django-cms Link"
link_plugin.external_link = u"https://www.django-cms.org"
# as for some reason mptt does not
# update the parent child relationship
# in the add_plugin method when a target present
# but this is not the topic of the test
link_plugin.parent = text_plugin
link_plugin.save()
# reloading needs to be done after every save
link_plugin = self.reload(link_plugin)
text_plugin = self.reload(text_plugin)
# mptt related insertion correct?
msg = u"parent plugin right is not updated, child not inserted correctly"
self.assertTrue(text_plugin.position == link_plugin.position, msg=msg)
msg = u"link has no parent"
self.assertFalse(link_plugin.parent is None, msg=msg)
msg = u"parent plugin path is not updated, child not inserted correctly"
self.assertTrue(text_plugin.path == link_plugin.path[:4], msg=msg)
msg = u"child level is not bigger than parent level"
self.assertTrue(text_plugin.depth < link_plugin.depth, msg=msg)
# add the link plugin to the body
# emulate the editor in admin that adds some txt for the nested plugin
in_txt = u"""<img id="plugin_obj_%s" title="Link" alt="Link" src="/static/cms/img/icons/plugins/link.png">"""
nesting_body = u"%s<p>%s</p>" % (text_plugin.body, (in_txt % (link_plugin.id)))
text_plugin.body = nesting_body
text_plugin.save()
text_plugin = self.reload(text_plugin)
# none of the descendants should have a placeholder other then my own one
self.assertEqual(text_plugin.get_descendants().exclude(placeholder=text_plugin.placeholder).count(), 0)
post_add_plugin_count = CMSPlugin.objects.count()
self.assertEqual(post_add_plugin_count, 2)
def test_copy_page_nested_plugin(self):
"""
Test to verify that page copy with a nested plugin works
page one - 3 placeholder
col_sidebar: 1 text plugin
col_left: 1 text plugin with nested link plugin
col_right: no plugin
page two (copy target)
Verify copied page, placeholders, plugins and body text
"""
with self.settings(CMS_PERMISSION=False):
# setup page 1
page_one = create_page(u"Three Placeholder", u"col_three.html", u"en",
position=u"last-child", published=True, in_navigation=True)
page_one_ph_one = page_one.placeholders.get(slot=u"col_sidebar")
page_one_ph_two = page_one.placeholders.get(slot=u"col_left")
page_one.placeholders.get(slot=u"col_right")
# add the text plugin to placeholder one
text_plugin_en = add_plugin(page_one_ph_one, u"TextPlugin", u"en", body="Hello World")
self.assertEqual(text_plugin_en.id, CMSPlugin.objects.all()[0].id)
self.assertEqual(text_plugin_en.get_children().count(), 0)
pre_add_plugin_count = CMSPlugin.objects.count()
self.assertEqual(pre_add_plugin_count, 1)
###
# add a plugin to placeholder two
###
pre_nesting_body = u"<p>the nested text plugin with a link inside</p>"
text_plugin_two = add_plugin(page_one_ph_two, u"TextPlugin", u"en", body=pre_nesting_body)
text_plugin_two = self.reload(text_plugin_two)
# prepare nesting plugin
page_one_ph_two = self.reload(page_one_ph_two)
text_plugin_two = self.reload(text_plugin_two)
link_plugin = add_plugin(page_one_ph_two, u"LinkPlugin", u"en", target=text_plugin_two)
link_plugin.name = u"django-cms Link"
link_plugin.external_link = u"https://www.django-cms.org"
link_plugin.parent = text_plugin_two
link_plugin.save()
link_plugin = self.reload(link_plugin)
text_plugin_two = self.reload(text_plugin_two)
in_txt = """<cms-plugin id="%s" title="Link" alt="Link"></cms-plugin>"""
nesting_body = "%s<p>%s</p>" % (text_plugin_two.body, (in_txt % (link_plugin.id)))
# emulate the editor in admin that adds some txt for the nested plugin
text_plugin_two.body = nesting_body
text_plugin_two.save()
text_plugin_two = self.reload(text_plugin_two)
# the link is attached as a child?
self.assertEqual(text_plugin_two.get_children().count(), 1)
post_add_plugin_count = CMSPlugin.objects.filter(placeholder__page__publisher_is_draft=True).count()
self.assertEqual(post_add_plugin_count, 3)
page_one.save()
# get the plugins from the original page
page_one = self.reload(page_one)
page_one_ph_one = page_one.placeholders.get(slot=u"col_sidebar")
page_one_ph_two = page_one.placeholders.get(slot=u"col_left")
page_one_ph_three = page_one.placeholders.get(slot=u"col_right")
# verify that the plugins got created
org_placeholder_one_plugins = page_one_ph_one.get_plugins()
self.assertEqual(len(org_placeholder_one_plugins), 1)
org_placeholder_two_plugins = page_one_ph_two.get_plugins()
self.assertEqual(len(org_placeholder_two_plugins), 2)
org_placeholder_three_plugins = page_one_ph_three.get_plugins()
self.assertEqual(len(org_placeholder_three_plugins), 0)
self.assertEqual(page_one.placeholders.count(), 3)
placeholder_count = Placeholder.objects.filter(page__publisher_is_draft=True).count()
self.assertEqual(placeholder_count, 3)
self.assertEqual(CMSPlugin.objects.filter(placeholder__page__publisher_is_draft=True).count(), 3)
##
# setup page_copy_target page
##
page_copy_target = create_page("Three Placeholder - page copy target", "col_three.html", "en",
position="last-child", published=True, in_navigation=True)
all_page_count = Page.objects.drafts().count()
pre_copy_placeholder_count = Placeholder.objects.filter(page__publisher_is_draft=True).count()
self.assertEqual(pre_copy_placeholder_count, 6)
# copy the page
superuser = self.get_superuser()
with self.login_user_context(superuser):
page_two = self.copy_page(page_one, page_copy_target)
# validate the expected pages,placeholders,plugins,pluginbodies
after_copy_page_plugin_count = CMSPlugin.objects.filter(placeholder__page__publisher_is_draft=True).count()
self.assertEqual(after_copy_page_plugin_count, 6)
# check the amount of copied stuff
after_copy_page_count = Page.objects.drafts().count()
after_copy_placeholder_count = Placeholder.objects.filter(page__publisher_is_draft=True).count()
self.assertGreater(after_copy_page_count, all_page_count, u"no new page after copy")
self.assertGreater(after_copy_page_plugin_count, post_add_plugin_count, u"plugin count is not grown")
self.assertGreater(after_copy_placeholder_count, pre_copy_placeholder_count,
u"placeholder count is not grown")
self.assertEqual(after_copy_page_count, 3, u"no new page after copy")
# original placeholder
page_one = self.reload(page_one)
page_one_ph_one = page_one.placeholders.get(slot=u"col_sidebar")
page_one_ph_two = page_one.placeholders.get(slot=u"col_left")
page_one_ph_three = page_one.placeholders.get(slot=u"col_right")
# check if there are multiple pages assigned to this placeholders
found_page = page_one_ph_one.page if page_one_ph_one else None
self.assertEqual(found_page, page_one)
found_page = page_one_ph_two.page if page_one_ph_two else None
self.assertEqual(found_page, page_one)
found_page = page_one_ph_three.page if page_one_ph_three else None
self.assertEqual(found_page, page_one)
page_two = self.reload(page_two)
page_two_ph_one = page_two.placeholders.get(slot=u"col_sidebar")
page_two_ph_two = page_two.placeholders.get(slot=u"col_left")
page_two_ph_three = page_two.placeholders.get(slot=u"col_right")
# check if there are multiple pages assigned to this placeholders
found_page = page_two_ph_one.page if page_two_ph_one else None
self.assertEqual(found_page, page_two)
found_page = page_two_ph_two.page if page_two_ph_two else None
self.assertEqual(found_page, page_two)
found_page = page_two_ph_three.page if page_two_ph_three else None
self.assertEqual(found_page, page_two)
# check the stored placeholders org vs copy
msg = 'placehoder ids copy:%s org:%s copied page %s are identical - tree broken' % (
page_two_ph_one.pk, page_one_ph_one.pk, page_two.pk)
self.assertNotEquals(page_two_ph_one.pk, page_one_ph_one.pk, msg)
msg = 'placehoder ids copy:%s org:%s copied page %s are identical - tree broken' % (
page_two_ph_two.pk, page_one_ph_two.pk, page_two.pk)
self.assertNotEquals(page_two_ph_two.pk, page_one_ph_two.pk, msg)
msg = 'placehoder ids copy:%s org:%s copied page %s are identical - tree broken' % (
page_two_ph_three.pk, page_one_ph_three.pk, page_two.pk)
self.assertNotEquals(page_two_ph_three.pk, page_one_ph_three.pk, msg)
# get the plugins from the original page
org_placeholder_one_plugins = page_one_ph_one.get_plugins()
self.assertEqual(len(org_placeholder_one_plugins), 1)
org_placeholder_two_plugins = page_one_ph_two.get_plugins()
self.assertEqual(len(org_placeholder_two_plugins), 2)
org_placeholder_three_plugins = page_one_ph_three.get_plugins()
self.assertEqual(len(org_placeholder_three_plugins), 0)
# get the plugins from the copied page
copied_placeholder_one_plugins = page_two_ph_one.get_plugins()
self.assertEqual(len(copied_placeholder_one_plugins), 1)
copied_placeholder_two_plugins = page_two_ph_two.get_plugins()
self.assertEqual(len(copied_placeholder_two_plugins), 2)
copied_placeholder_three_plugins = page_two_ph_three.get_plugins()
self.assertEqual(len(copied_placeholder_three_plugins), 0)
# verify the plugins got copied
# placeholder 1
count_plugins_copied = len(copied_placeholder_one_plugins)
count_plugins_org = len(org_placeholder_one_plugins)
msg = u"plugin count %s %s for placeholder one not equal" % (count_plugins_copied, count_plugins_org)
self.assertEqual(count_plugins_copied, count_plugins_org, msg)
# placeholder 2
count_plugins_copied = len(copied_placeholder_two_plugins)
count_plugins_org = len(org_placeholder_two_plugins)
msg = u"plugin count %s %s for placeholder two not equal" % (count_plugins_copied, count_plugins_org)
self.assertEqual(count_plugins_copied, count_plugins_org, msg)
# placeholder 3
count_plugins_copied = len(copied_placeholder_three_plugins)
count_plugins_org = len(org_placeholder_three_plugins)
msg = u"plugin count %s %s for placeholder three not equal" % (count_plugins_copied, count_plugins_org)
self.assertEqual(count_plugins_copied, count_plugins_org, msg)
# verify the body of text plugin with nested link plugin
# org to copied
org_nested_text_plugin = None
# do this iteration to find the real text plugin with the attached link
# the inheritance mechanism for the cmsplugins works through
# (tuple)get_plugin_instance()
for x in org_placeholder_two_plugins:
if x.plugin_type == u"TextPlugin":
instance = x.get_plugin_instance()[0]
if instance.body.startswith(pre_nesting_body):
org_nested_text_plugin = instance
break
copied_nested_text_plugin = None
for x in copied_placeholder_two_plugins:
if x.plugin_type == u"TextPlugin":
instance = x.get_plugin_instance()[0]
if instance.body.startswith(pre_nesting_body):
copied_nested_text_plugin = instance
break
msg = u"orginal nested text plugin not found"
self.assertNotEquals(org_nested_text_plugin, None, msg=msg)
msg = u"copied nested text plugin not found"
self.assertNotEquals(copied_nested_text_plugin, None, msg=msg)
# get the children ids of the texplugin with a nested link
# to check if the body of the text is genrated correctly
org_link_child_plugin = org_nested_text_plugin.get_children()[0]
copied_link_child_plugin = copied_nested_text_plugin.get_children()[0]
# validate the textplugin body texts
msg = u"org plugin and copied plugin are the same"
self.assertTrue(org_link_child_plugin.id != copied_link_child_plugin.id, msg)
needle = u"%s"
msg = u"child plugin id differs to parent in body"
# linked child is in body
self.assertTrue(org_nested_text_plugin.body.find(needle % (org_link_child_plugin.id)) != -1, msg)
msg = u"copy: child plugin id differs to parent in body"
self.assertTrue(copied_nested_text_plugin.body.find(needle % (copied_link_child_plugin.id)) != -1, msg)
# really nothing else
msg = u"child link plugin id differs to parent body"
self.assertTrue(org_nested_text_plugin.body.find(needle % (copied_link_child_plugin.id)) == -1, msg)
msg = u"copy: child link plugin id differs to parent body"
self.assertTrue(copied_nested_text_plugin.body.find(needle % (org_link_child_plugin.id)) == -1, msg)
# now reverse lookup the placeholders from the plugins
org_placeholder = org_link_child_plugin.placeholder
copied_placeholder = copied_link_child_plugin.placeholder
msg = u"placeholder of the orginal plugin and copied plugin are the same"
ok = ((org_placeholder.id != copied_placeholder.id))
self.assertTrue(ok, msg)
def test_copy_page_nested_plugin_moved_parent_plugin(self):
"""
Test to verify that page copy with a nested plugin works
when a plugin with child got moved to another placeholder
page one - 3 placeholder
col_sidebar:
1 text plugin
col_left: 1 text plugin with nested link plugin
col_right: no plugin
page two (copy target)
step2: move the col_left text plugin to col_right
col_sidebar:
1 text plugin
col_left: no plugin
col_right: 1 text plugin with nested link plugin
verify the copied page structure
"""
with self.settings(CMS_PERMISSION=False):
# setup page 1
page_one = create_page(u"Three Placeholder", u"col_three.html", u"en",
position=u"last-child", published=True, in_navigation=True)
page_one_ph_one = page_one.placeholders.get(slot=u"col_sidebar")
page_one_ph_two = page_one.placeholders.get(slot=u"col_left")
page_one.placeholders.get(slot=u"col_right")
# add the text plugin to placeholder one
text_plugin_en = add_plugin(page_one_ph_one, u"TextPlugin", u"en", body=u"Hello World")
self.assertEqual(text_plugin_en.id, CMSPlugin.objects.all()[0].id)
self.assertEqual(text_plugin_en.get_children().count(), 0)
pre_add_plugin_count = CMSPlugin.objects.count()
self.assertEqual(pre_add_plugin_count, 1)
# add a plugin to placeholder twho
pre_nesting_body = u"<p>the nested text plugin with a link inside</p>"
text_plugin_two = add_plugin(page_one_ph_two, u"TextPlugin", u"en", body=pre_nesting_body)
text_plugin_two = self.reload(text_plugin_two)
# prepare nestin plugin
page_one_ph_two = self.reload(page_one_ph_two)
text_plugin_two = self.reload(text_plugin_two)
link_plugin = add_plugin(page_one_ph_two, u"LinkPlugin", u"en", target=text_plugin_two)
link_plugin.name = u"django-cms Link"
link_plugin.external_link = u"https://www.django-cms.org"
link_plugin.parent = text_plugin_two
link_plugin.save()
# reload after every save
link_plugin = self.reload(link_plugin)
text_plugin_two = self.reload(text_plugin_two)
in_txt = u"""<cms-plugin id="%s" title="Link" alt="Link"></cms-plugin>"""
nesting_body = "%s<p>%s</p>" % (text_plugin_two.body, (in_txt % (link_plugin.id)))
# emulate the editor in admin that adds some txt for the nested plugin
text_plugin_two.body = nesting_body
text_plugin_two.save()
text_plugin_two = self.reload(text_plugin_two)
# the link is attached as a child?
self.assertEqual(text_plugin_two.get_children().count(), 1)
post_add_plugin_count = CMSPlugin.objects.count()
self.assertEqual(post_add_plugin_count, 3)
page_one.save()
# get the plugins from the original page
page_one = self.reload(page_one)
page_one_ph_one = page_one.placeholders.get(slot=u"col_sidebar")
page_one_ph_two = page_one.placeholders.get(slot=u"col_left")
page_one_ph_three = page_one.placeholders.get(slot=u"col_right")
# verify the plugins got created
org_placeholder_one_plugins = page_one_ph_one.get_plugins()
self.assertEqual(len(org_placeholder_one_plugins), 1)
org_placeholder_two_plugins = page_one_ph_two.get_plugins()
self.assertEqual(len(org_placeholder_two_plugins), 2)
org_placeholder_three_plugins = page_one_ph_three.get_plugins()
self.assertEqual(len(org_placeholder_three_plugins), 0)
self.assertEqual(page_one.placeholders.count(), 3)
placeholder_count = Placeholder.objects.filter(page__publisher_is_draft=True).count()
self.assertEqual(placeholder_count, 3)
self.assertEqual(CMSPlugin.objects.count(), 3)
# setup page_copy_target
page_copy_target = create_page("Three Placeholder - page copy target", "col_three.html", "en",
position="last-child", published=True, in_navigation=True)
all_page_count = Page.objects.drafts().count()
pre_copy_placeholder_count = Placeholder.objects.filter(page__publisher_is_draft=True).count()
self.assertEqual(pre_copy_placeholder_count, 6)
superuser = self.get_superuser()
with self.login_user_context(superuser):
# now move the parent text plugin to another placeholder
post_data = {
'placeholder_id': page_one_ph_three.id,
'plugin_id': text_plugin_two.id,
'plugin_language': 'en',
'plugin_parent': '',
}
edit_url = self.get_move_plugin_uri(text_plugin_two)
response = self.client.post(edit_url, post_data)
self.assertEqual(response.status_code, 200)
# check if the plugin got moved
page_one = self.reload(page_one)
self.reload(text_plugin_two)
page_one_ph_one = page_one.placeholders.get(slot=u"col_sidebar")
page_one_ph_two = page_one.placeholders.get(slot=u"col_left")
page_one_ph_three = page_one.placeholders.get(slot=u"col_right")
org_placeholder_one_plugins = page_one_ph_one.get_plugins()
self.assertEqual(len(org_placeholder_one_plugins), 1)
org_placeholder_two_plugins = page_one_ph_two.get_plugins()
# the plugin got moved and child got moved
self.assertEqual(len(org_placeholder_two_plugins), 0)
org_placeholder_three_plugins = page_one_ph_three.get_plugins()
self.assertEqual(len(org_placeholder_three_plugins), 2)
# copy the page
page_two = self.copy_page(page_one, page_copy_target)
# validate the expected pages,placeholders,plugins,pluginbodies
after_copy_page_plugin_count = CMSPlugin.objects.count()
self.assertEqual(after_copy_page_plugin_count, 6)
after_copy_page_count = Page.objects.drafts().count()
after_copy_placeholder_count = Placeholder.objects.filter(page__publisher_is_draft=True).count()
self.assertGreater(after_copy_page_count, all_page_count, u"no new page after copy")
self.assertGreater(after_copy_page_plugin_count, post_add_plugin_count, u"plugin count is not grown")
self.assertGreater(after_copy_placeholder_count, pre_copy_placeholder_count,
u"placeholder count is not grown")
self.assertEqual(after_copy_page_count, 3, u"no new page after copy")
# validate the structure
# orginal placeholder
page_one = self.reload(page_one)
page_one_ph_one = page_one.placeholders.get(slot=u"col_sidebar")
page_one_ph_two = page_one.placeholders.get(slot=u"col_left")
page_one_ph_three = page_one.placeholders.get(slot=u"col_right")
# check if there are multiple pages assigned to this placeholders
found_page = page_one_ph_one.page if page_one_ph_one else None
self.assertEqual(found_page, page_one)
found_page = page_one_ph_two.page if page_one_ph_two else None
self.assertEqual(found_page, page_one)
found_page = page_one_ph_three.page if page_one_ph_three else None
self.assertEqual(found_page, page_one)
page_two = self.reload(page_two)
page_two_ph_one = page_two.placeholders.get(slot=u"col_sidebar")
page_two_ph_two = page_two.placeholders.get(slot=u"col_left")
page_two_ph_three = page_two.placeholders.get(slot=u"col_right")
# check if there are multiple pages assigned to this placeholders
found_page = page_two_ph_one.page if page_two_ph_one else None
self.assertEqual(found_page, page_two)
found_page = page_two_ph_two.page if page_two_ph_two else None
self.assertEqual(found_page, page_two)
found_page = page_two_ph_three.page if page_two_ph_three else None
self.assertEqual(found_page, page_two)
# check the stored placeholders org vs copy
msg = u'placehoder ids copy:%s org:%s copied page %s are identical - tree broken' % (
page_two_ph_one.pk, page_one_ph_one.pk, page_two.pk)
self.assertNotEquals(page_two_ph_one.pk, page_one_ph_one.pk, msg)
msg = u'placehoder ids copy:%s org:%s copied page %s are identical - tree broken' % (
page_two_ph_two.pk, page_one_ph_two.pk, page_two.pk)
self.assertNotEquals(page_two_ph_two.pk, page_one_ph_two.pk, msg)
msg = u'placehoder ids copy:%s org:%s copied page %s are identical - tree broken' % (
page_two_ph_three.pk, page_one_ph_three.pk, page_two.pk)
self.assertNotEquals(page_two_ph_three.pk, page_one_ph_three.pk, msg)
# get the plugins from the original page
org_placeholder_one_plugins = page_one_ph_one.get_plugins()
self.assertEqual(len(org_placeholder_one_plugins), 1)
org_placeholder_two_plugins = page_one_ph_two.get_plugins()
self.assertEqual(len(org_placeholder_two_plugins), 0)
org_placeholder_three_plugins = page_one_ph_three.get_plugins()
self.assertEqual(len(org_placeholder_three_plugins), 2)
# get the plugins from the copied page
copied_placeholder_one_plugins = page_two_ph_one.get_plugins()
self.assertEqual(len(copied_placeholder_one_plugins), 1)
copied_placeholder_two_plugins = page_two_ph_two.get_plugins()
self.assertEqual(len(copied_placeholder_two_plugins), 0)
copied_placeholder_three_plugins = page_two_ph_three.get_plugins()
self.assertEqual(len(copied_placeholder_three_plugins), 2)
# verify the plugins got copied
# placeholder 1
count_plugins_copied = len(copied_placeholder_one_plugins)
count_plugins_org = len(org_placeholder_one_plugins)
msg = u"plugin count %s %s for placeholder one not equal" % (count_plugins_copied, count_plugins_org)
self.assertEqual(count_plugins_copied, count_plugins_org, msg)
# placeholder 2
count_plugins_copied = len(copied_placeholder_two_plugins)
count_plugins_org = len(org_placeholder_two_plugins)
msg = u"plugin count %s %s for placeholder two not equal" % (count_plugins_copied, count_plugins_org)
self.assertEqual(count_plugins_copied, count_plugins_org, msg)
# placeholder 3
count_plugins_copied = len(copied_placeholder_three_plugins)
count_plugins_org = len(org_placeholder_three_plugins)
msg = u"plugin count %s %s for placeholder three not equal" % (count_plugins_copied, count_plugins_org)
self.assertEqual(count_plugins_copied, count_plugins_org, msg)
# verify the body of text plugin with nested link plugin
# org to copied
org_nested_text_plugin = None
# do this iteration to find the real text plugin with the attached link
# the inheritance mechanism for the cmsplugins works through
# (tuple)get_plugin_instance()
for x in org_placeholder_three_plugins:
if x.plugin_type == u"TextPlugin":
instance = x.get_plugin_instance()[0]
if instance.body.startswith(pre_nesting_body):
org_nested_text_plugin = instance
break
copied_nested_text_plugin = None
for x in copied_placeholder_three_plugins:
if x.plugin_type == u"TextPlugin":
instance = x.get_plugin_instance()[0]
if instance.body.startswith(pre_nesting_body):
copied_nested_text_plugin = instance
break
msg = u"orginal nested text plugin not found"
self.assertNotEquals(org_nested_text_plugin, None, msg=msg)
msg = u"copied nested text plugin not found"
self.assertNotEquals(copied_nested_text_plugin, None, msg=msg)
# get the children ids of the texplugin with a nested link
# to check if the body of the text is generated correctly
org_link_child_plugin = org_nested_text_plugin.get_children()[0]
copied_link_child_plugin = copied_nested_text_plugin.get_children()[0]
# validate the textplugin body texts
msg = u"org plugin and copied plugin are the same"
self.assertNotEqual(org_link_child_plugin.id, copied_link_child_plugin.id, msg)
needle = u"%s"
msg = u"child plugin id differs to parent in body"
# linked child is in body
self.assertTrue(org_nested_text_plugin.body.find(needle % (org_link_child_plugin.id)) != -1, msg)
msg = u"copy: child plugin id differs to parent in body plugin_obj_id"
self.assertTrue(copied_nested_text_plugin.body.find(needle % (copied_link_child_plugin.id)) != -1, msg)
# really nothing else
msg = u"child link plugin id differs to parent body"
self.assertTrue(org_nested_text_plugin.body.find(needle % (copied_link_child_plugin.id)) == -1, msg)
msg = u"copy: child link plugin id differs to parent body"
self.assertTrue(copied_nested_text_plugin.body.find(needle % (org_link_child_plugin.id)) == -1, msg)
# now reverse lookup the placeholders from the plugins
org_placeholder = org_link_child_plugin.placeholder
copied_placeholder = copied_link_child_plugin.placeholder
msg = u"placeholder of the orginal plugin and copied plugin are the same"
self.assertNotEqual(org_placeholder.id, copied_placeholder.id, msg)
def test_add_child_plugin(self):
page_one = create_page(u"Three Placeholder", u"col_three.html", u"en",
position=u"last-child", published=True, in_navigation=True)
page_one_ph_one = page_one.placeholders.get(slot=u"col_sidebar")
# add the text plugin to placeholder one
text_plugin_en = add_plugin(page_one_ph_one, u"TextPlugin", u"en", body=u"Hello World")
superuser = self.get_superuser()
with self.login_user_context(superuser):
post_data = {
'name': 'test',
'external_link': 'http://www.example.org/'
}
add_url = self.get_add_plugin_uri(page_one_ph_one, 'LinkPlugin', parent=text_plugin_en)
response = self.client.post(add_url, post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(
response,
'admin/cms/page/plugin/confirm_form.html'
)
link_plugin = CMSPlugin.objects.get(parent_id=text_plugin_en.pk)
self.assertEqual(link_plugin.parent_id, text_plugin_en.pk)
self.assertEqual(link_plugin.path, '00010001')
|
[
"cms.models.pluginmodel.CMSPlugin.objects.all",
"cms.models.placeholdermodel.Placeholder",
"cms.models.pluginmodel.CMSPlugin.objects.update",
"djangocms_text_ckeditor.models.Text.objects.get",
"cms.models.Page.objects.drafts",
"cms.models.placeholdermodel.Placeholder.objects.filter",
"cms.api.create_page",
"cms.models.placeholdermodel.Placeholder.objects.create",
"cms.models.pluginmodel.CMSPlugin.objects.get",
"cms.api.add_plugin",
"cms.utils.plugins.reorder_plugins",
"cms.models.pluginmodel.CMSPlugin.fix_tree",
"cms.models.pluginmodel.CMSPlugin.objects.filter",
"cms.models.pluginmodel.CMSPlugin.objects.count"
] |
[((750, 863), 'cms.models.pluginmodel.CMSPlugin.objects.filter', 'CMSPlugin.objects.filter', ([], {'parent_id': 'parent_id', 'language': 'plugin.language', 'placeholder_id': 'plugin.placeholder_id'}), '(parent_id=parent_id, language=plugin.language,\n placeholder_id=plugin.placeholder_id)\n', (774, 863), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((5833, 5863), 'cms.models.placeholdermodel.Placeholder', 'Placeholder', ([], {'slot': 'u"""some_slot"""'}), "(slot=u'some_slot')\n", (5844, 5863), False, 'from cms.models.placeholdermodel import Placeholder\n'), ((5985, 6042), 'cms.api.add_plugin', 'add_plugin', (['placeholder', 'u"""TextPlugin"""', 'u"""en"""'], {'body': 'u"""01"""'}), "(placeholder, u'TextPlugin', u'en', body=u'01')\n", (5995, 6042), False, 'from cms.api import create_page, add_plugin\n'), ((6305, 6379), 'cms.api.add_plugin', 'add_plugin', (['placeholder', 'u"""TextPlugin"""', 'u"""en"""'], {'body': 'u"""02"""', 'target': 'plugin_1'}), "(placeholder, u'TextPlugin', u'en', body=u'02', target=plugin_1)\n", (6315, 6379), False, 'from cms.api import create_page, add_plugin\n'), ((6539, 6613), 'cms.api.add_plugin', 'add_plugin', (['placeholder', 'u"""TextPlugin"""', 'u"""en"""'], {'body': 'u"""03"""', 'target': 'plugin_1'}), "(placeholder, u'TextPlugin', u'en', body=u'03', target=plugin_1)\n", (6549, 6613), False, 'from cms.api import create_page, add_plugin\n'), ((6756, 6830), 'cms.api.add_plugin', 'add_plugin', (['placeholder', 'u"""TextPlugin"""', 'u"""en"""'], {'body': 'u"""04"""', 'target': 'plugin_2'}), "(placeholder, u'TextPlugin', u'en', body=u'04', target=plugin_2)\n", (6766, 6830), False, 'from cms.api import create_page, add_plugin\n'), ((6998, 7055), 'cms.api.add_plugin', 'add_plugin', (['placeholder', 'u"""TextPlugin"""', 'u"""en"""'], {'body': 'u"""05"""'}), "(placeholder, u'TextPlugin', u'en', body=u'05')\n", (7008, 7055), False, 'from cms.api import create_page, add_plugin\n'), ((7401, 7475), 'cms.api.add_plugin', 'add_plugin', (['placeholder', 'u"""TextPlugin"""', 'u"""en"""'], {'body': 'u"""06"""', 'target': 'plugin_5'}), "(placeholder, u'TextPlugin', u'en', body=u'06', target=plugin_5)\n", (7411, 7475), False, 'from cms.api import create_page, add_plugin\n'), ((7618, 7692), 'cms.api.add_plugin', 'add_plugin', (['placeholder', 'u"""TextPlugin"""', 'u"""en"""'], {'body': 'u"""07"""', 'target': 'plugin_5'}), "(placeholder, u'TextPlugin', u'en', body=u'07', target=plugin_5)\n", (7628, 7692), False, 'from cms.api import create_page, add_plugin\n'), ((7843, 7917), 'cms.api.add_plugin', 'add_plugin', (['placeholder', 'u"""TextPlugin"""', 'u"""en"""'], {'body': 'u"""08"""', 'target': 'plugin_2'}), "(placeholder, u'TextPlugin', u'en', body=u'08', target=plugin_2)\n", (7853, 7917), False, 'from cms.api import create_page, add_plugin\n'), ((8060, 8134), 'cms.api.add_plugin', 'add_plugin', (['placeholder', 'u"""TextPlugin"""', 'u"""en"""'], {'body': 'u"""09"""', 'target': 'plugin_3'}), "(placeholder, u'TextPlugin', u'en', body=u'09', target=plugin_3)\n", (8070, 8134), False, 'from cms.api import create_page, add_plugin\n'), ((8278, 8352), 'cms.api.add_plugin', 'add_plugin', (['placeholder', 'u"""TextPlugin"""', 'u"""en"""'], {'body': 'u"""10"""', 'target': 'plugin_4'}), "(placeholder, u'TextPlugin', u'en', body=u'10', target=plugin_4)\n", (8288, 8352), False, 'from cms.api import create_page, add_plugin\n'), ((9203, 9252), 'cms.models.pluginmodel.CMSPlugin.objects.filter', 'CMSPlugin.objects.filter', ([], {'placeholder': 'placeholder'}), '(placeholder=placeholder)\n', (9227, 9252), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((9862, 9898), 'cms.models.pluginmodel.CMSPlugin.objects.update', 'CMSPlugin.objects.update', ([], {'position': '(1)'}), '(position=1)\n', (9886, 9898), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((9907, 9927), 'cms.models.pluginmodel.CMSPlugin.fix_tree', 'CMSPlugin.fix_tree', ([], {}), '()\n', (9925, 9927), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((10642, 10711), 'cms.utils.plugins.reorder_plugins', 'reorder_plugins', (['placeholder', 'None', 'u"""en"""', '[plugin_5.pk, plugin_1.pk]'], {}), "(placeholder, None, u'en', [plugin_5.pk, plugin_1.pk])\n", (10657, 10711), False, 'from cms.utils.plugins import reorder_plugins\n'), ((10809, 10829), 'cms.models.pluginmodel.CMSPlugin.fix_tree', 'CMSPlugin.fix_tree', ([], {}), '()\n', (10827, 10829), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((12244, 12274), 'cms.models.placeholdermodel.Placeholder', 'Placeholder', ([], {'slot': 'u"""some_slot"""'}), "(slot=u'some_slot')\n", (12255, 12274), False, 'from cms.models.placeholdermodel import Placeholder\n'), ((12396, 12453), 'cms.api.add_plugin', 'add_plugin', (['placeholder', 'u"""TextPlugin"""', 'u"""en"""'], {'body': 'u"""01"""'}), "(placeholder, u'TextPlugin', u'en', body=u'01')\n", (12406, 12453), False, 'from cms.api import create_page, add_plugin\n'), ((12746, 12820), 'cms.api.add_plugin', 'add_plugin', (['placeholder', 'u"""TextPlugin"""', 'u"""en"""'], {'body': 'u"""02"""', 'target': 'plugin_1'}), "(placeholder, u'TextPlugin', u'en', body=u'02', target=plugin_1)\n", (12756, 12820), False, 'from cms.api import create_page, add_plugin\n'), ((13225, 13299), 'cms.api.add_plugin', 'add_plugin', (['placeholder', 'u"""TextPlugin"""', 'u"""en"""'], {'body': 'u"""03"""', 'target': 'plugin_1'}), "(placeholder, u'TextPlugin', u'en', body=u'03', target=plugin_1)\n", (13235, 13299), False, 'from cms.api import create_page, add_plugin\n'), ((13860, 13934), 'cms.api.add_plugin', 'add_plugin', (['placeholder', 'u"""TextPlugin"""', 'u"""en"""'], {'body': 'u"""04"""', 'target': 'plugin_2'}), "(placeholder, u'TextPlugin', u'en', body=u'04', target=plugin_2)\n", (13870, 13934), False, 'from cms.api import create_page, add_plugin\n'), ((14684, 14741), 'cms.api.add_plugin', 'add_plugin', (['placeholder', 'u"""TextPlugin"""', 'u"""en"""'], {'body': 'u"""05"""'}), "(placeholder, u'TextPlugin', u'en', body=u'05')\n", (14694, 14741), False, 'from cms.api import create_page, add_plugin\n'), ((15087, 15161), 'cms.api.add_plugin', 'add_plugin', (['placeholder', 'u"""TextPlugin"""', 'u"""en"""'], {'body': 'u"""06"""', 'target': 'plugin_5'}), "(placeholder, u'TextPlugin', u'en', body=u'06', target=plugin_5)\n", (15097, 15161), False, 'from cms.api import create_page, add_plugin\n'), ((15491, 15565), 'cms.api.add_plugin', 'add_plugin', (['placeholder', 'u"""TextPlugin"""', 'u"""en"""'], {'body': 'u"""07"""', 'target': 'plugin_5'}), "(placeholder, u'TextPlugin', u'en', body=u'07', target=plugin_5)\n", (15501, 15565), False, 'from cms.api import create_page, add_plugin\n'), ((16273, 16347), 'cms.api.add_plugin', 'add_plugin', (['placeholder', 'u"""TextPlugin"""', 'u"""en"""'], {'body': 'u"""08"""', 'target': 'plugin_2'}), "(placeholder, u'TextPlugin', u'en', body=u'08', target=plugin_2)\n", (16283, 16347), False, 'from cms.api import create_page, add_plugin\n'), ((16762, 16836), 'cms.api.add_plugin', 'add_plugin', (['placeholder', 'u"""TextPlugin"""', 'u"""en"""'], {'body': 'u"""09"""', 'target': 'plugin_3'}), "(placeholder, u'TextPlugin', u'en', body=u'09', target=plugin_3)\n", (16772, 16836), False, 'from cms.api import create_page, add_plugin\n'), ((17166, 17240), 'cms.api.add_plugin', 'add_plugin', (['placeholder', 'u"""TextPlugin"""', 'u"""en"""'], {'body': 'u"""10"""', 'target': 'plugin_4'}), "(placeholder, u'TextPlugin', u'en', body=u'10', target=plugin_4)\n", (17176, 17240), False, 'from cms.api import create_page, add_plugin\n'), ((17690, 17785), 'cms.api.add_plugin', 'add_plugin', (['placeholder', 'u"""TextPlugin"""', 'u"""en"""'], {'body': 'u"""11"""', 'target': 'plugin_1', 'position': '"""left"""'}), "(placeholder, u'TextPlugin', u'en', body=u'11', target=plugin_1,\n position='left')\n", (17700, 17785), False, 'from cms.api import create_page, add_plugin\n'), ((18221, 18316), 'cms.api.add_plugin', 'add_plugin', (['placeholder', 'u"""TextPlugin"""', 'u"""en"""'], {'body': 'u"""12"""', 'target': 'plugin_4', 'position': '"""left"""'}), "(placeholder, u'TextPlugin', u'en', body=u'12', target=plugin_4,\n position='left')\n", (18231, 18316), False, 'from cms.api import create_page, add_plugin\n'), ((18809, 18905), 'cms.api.add_plugin', 'add_plugin', (['placeholder', 'u"""TextPlugin"""', 'u"""en"""'], {'body': 'u"""13"""', 'target': 'plugin_7', 'position': '"""right"""'}), "(placeholder, u'TextPlugin', u'en', body=u'13', target=plugin_7,\n position='right')\n", (18819, 18905), False, 'from cms.api import create_page, add_plugin\n'), ((19395, 19452), 'cms.api.add_plugin', 'add_plugin', (['placeholder', 'u"""TextPlugin"""', 'u"""en"""'], {'body': 'u"""14"""'}), "(placeholder, u'TextPlugin', u'en', body=u'14')\n", (19405, 19452), False, 'from cms.api import create_page, add_plugin\n'), ((52927, 53051), 'cms.api.create_page', 'create_page', (['u"""Three Placeholder"""', 'u"""col_three.html"""', 'u"""en"""'], {'position': 'u"""last-child"""', 'published': '(True)', 'in_navigation': '(True)'}), "(u'Three Placeholder', u'col_three.html', u'en', position=\n u'last-child', published=True, in_navigation=True)\n", (52938, 53051), False, 'from cms.api import create_page, add_plugin\n'), ((53229, 53299), 'cms.api.add_plugin', 'add_plugin', (['page_one_ph_one', 'u"""TextPlugin"""', 'u"""en"""'], {'body': 'u"""Hello World"""'}), "(page_one_ph_one, u'TextPlugin', u'en', body=u'Hello World')\n", (53239, 53299), False, 'from cms.api import create_page, add_plugin\n'), ((53894, 53944), 'cms.models.pluginmodel.CMSPlugin.objects.get', 'CMSPlugin.objects.get', ([], {'parent_id': 'text_plugin_en.pk'}), '(parent_id=text_plugin_en.pk)\n', (53915, 53944), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((1547, 1605), 'cms.models.placeholdermodel.Placeholder.objects.create', 'Placeholder.objects.create', ([], {'slot': 'original_placeholder.slot'}), '(slot=original_placeholder.slot)\n', (1573, 1605), False, 'from cms.models.placeholdermodel import Placeholder\n'), ((22630, 22754), 'cms.api.create_page', 'create_page', (['u"""Three Placeholder"""', 'u"""col_three.html"""', 'u"""en"""'], {'position': 'u"""last-child"""', 'published': '(True)', 'in_navigation': '(True)'}), "(u'Three Placeholder', u'col_three.html', u'en', position=\n u'last-child', published=True, in_navigation=True)\n", (22641, 22754), False, 'from cms.api import create_page, add_plugin\n'), ((22996, 23068), 'cms.api.add_plugin', 'add_plugin', (['page_one_ph_two', 'u"""TextPlugin"""', 'u"""en"""'], {'body': 'pre_nesting_body'}), "(page_one_ph_two, u'TextPlugin', u'en', body=pre_nesting_body)\n", (23006, 23068), False, 'from cms.api import create_page, add_plugin\n'), ((23241, 23310), 'cms.api.add_plugin', 'add_plugin', (['page_one_ph_two', 'u"""LinkPlugin"""', 'u"""en"""'], {'target': 'text_plugin'}), "(page_one_ph_two, u'LinkPlugin', u'en', target=text_plugin)\n", (23251, 23310), False, 'from cms.api import create_page, add_plugin\n'), ((25215, 25240), 'cms.models.pluginmodel.CMSPlugin.objects.count', 'CMSPlugin.objects.count', ([], {}), '()\n', (25238, 25240), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((25814, 25938), 'cms.api.create_page', 'create_page', (['u"""Three Placeholder"""', 'u"""col_three.html"""', 'u"""en"""'], {'position': 'u"""last-child"""', 'published': '(True)', 'in_navigation': '(True)'}), "(u'Three Placeholder', u'col_three.html', u'en', position=\n u'last-child', published=True, in_navigation=True)\n", (25825, 25938), False, 'from cms.api import create_page, add_plugin\n'), ((26259, 26328), 'cms.api.add_plugin', 'add_plugin', (['page_one_ph_one', 'u"""TextPlugin"""', 'u"""en"""'], {'body': '"""Hello World"""'}), "(page_one_ph_one, u'TextPlugin', u'en', body='Hello World')\n", (26269, 26328), False, 'from cms.api import create_page, add_plugin\n'), ((26514, 26539), 'cms.models.pluginmodel.CMSPlugin.objects.count', 'CMSPlugin.objects.count', ([], {}), '()\n', (26537, 26539), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((26785, 26857), 'cms.api.add_plugin', 'add_plugin', (['page_one_ph_two', 'u"""TextPlugin"""', 'u"""en"""'], {'body': 'pre_nesting_body'}), "(page_one_ph_two, u'TextPlugin', u'en', body=pre_nesting_body)\n", (26795, 26857), False, 'from cms.api import create_page, add_plugin\n'), ((27098, 27171), 'cms.api.add_plugin', 'add_plugin', (['page_one_ph_two', 'u"""LinkPlugin"""', 'u"""en"""'], {'target': 'text_plugin_two'}), "(page_one_ph_two, u'LinkPlugin', u'en', target=text_plugin_two)\n", (27108, 27171), False, 'from cms.api import create_page, add_plugin\n'), ((29424, 29562), 'cms.api.create_page', 'create_page', (['"""Three Placeholder - page copy target"""', '"""col_three.html"""', '"""en"""'], {'position': '"""last-child"""', 'published': '(True)', 'in_navigation': '(True)'}), "('Three Placeholder - page copy target', 'col_three.html', 'en',\n position='last-child', published=True, in_navigation=True)\n", (29435, 29562), False, 'from cms.api import create_page, add_plugin\n'), ((39158, 39282), 'cms.api.create_page', 'create_page', (['u"""Three Placeholder"""', 'u"""col_three.html"""', 'u"""en"""'], {'position': 'u"""last-child"""', 'published': '(True)', 'in_navigation': '(True)'}), "(u'Three Placeholder', u'col_three.html', u'en', position=\n u'last-child', published=True, in_navigation=True)\n", (39169, 39282), False, 'from cms.api import create_page, add_plugin\n'), ((39603, 39673), 'cms.api.add_plugin', 'add_plugin', (['page_one_ph_one', 'u"""TextPlugin"""', 'u"""en"""'], {'body': 'u"""Hello World"""'}), "(page_one_ph_one, u'TextPlugin', u'en', body=u'Hello World')\n", (39613, 39673), False, 'from cms.api import create_page, add_plugin\n'), ((39859, 39884), 'cms.models.pluginmodel.CMSPlugin.objects.count', 'CMSPlugin.objects.count', ([], {}), '()\n', (39882, 39884), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((40099, 40171), 'cms.api.add_plugin', 'add_plugin', (['page_one_ph_two', 'u"""TextPlugin"""', 'u"""en"""'], {'body': 'pre_nesting_body'}), "(page_one_ph_two, u'TextPlugin', u'en', body=pre_nesting_body)\n", (40109, 40171), False, 'from cms.api import create_page, add_plugin\n'), ((40411, 40484), 'cms.api.add_plugin', 'add_plugin', (['page_one_ph_two', 'u"""LinkPlugin"""', 'u"""en"""'], {'target': 'text_plugin_two'}), "(page_one_ph_two, u'LinkPlugin', u'en', target=text_plugin_two)\n", (40421, 40484), False, 'from cms.api import create_page, add_plugin\n'), ((41394, 41419), 'cms.models.pluginmodel.CMSPlugin.objects.count', 'CMSPlugin.objects.count', ([], {}), '()\n', (41417, 41419), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((42634, 42772), 'cms.api.create_page', 'create_page', (['"""Three Placeholder - page copy target"""', '"""col_three.html"""', '"""en"""'], {'position': '"""last-child"""', 'published': '(True)', 'in_navigation': '(True)'}), "('Three Placeholder - page copy target', 'col_three.html', 'en',\n position='last-child', published=True, in_navigation=True)\n", (42645, 42772), False, 'from cms.api import create_page, add_plugin\n'), ((44763, 44788), 'cms.models.pluginmodel.CMSPlugin.objects.count', 'CMSPlugin.objects.count', ([], {}), '()\n', (44786, 44788), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((3789, 3821), 'djangocms_text_ckeditor.models.Text.objects.get', 'Text.objects.get', ([], {'id': 'original.id'}), '(id=original.id)\n', (3805, 3821), False, 'from djangocms_text_ckeditor.models import Text\n'), ((3859, 3887), 'djangocms_text_ckeditor.models.Text.objects.get', 'Text.objects.get', ([], {'id': 'copy.id'}), '(id=copy.id)\n', (3875, 3887), False, 'from djangocms_text_ckeditor.models import Text\n'), ((13080, 13117), 'cms.models.pluginmodel.CMSPlugin.objects.get', 'CMSPlugin.objects.get', ([], {'id': 'plugin_2.pk'}), '(id=plugin_2.pk)\n', (13101, 13117), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((13662, 13699), 'cms.models.pluginmodel.CMSPlugin.objects.get', 'CMSPlugin.objects.get', ([], {'id': 'plugin_2.pk'}), '(id=plugin_2.pk)\n', (13683, 13699), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((13717, 13754), 'cms.models.pluginmodel.CMSPlugin.objects.get', 'CMSPlugin.objects.get', ([], {'id': 'plugin_3.pk'}), '(id=plugin_3.pk)\n', (13738, 13754), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((14134, 14171), 'cms.models.pluginmodel.CMSPlugin.objects.get', 'CMSPlugin.objects.get', ([], {'id': 'plugin_4.pk'}), '(id=plugin_4.pk)\n', (14155, 14171), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((14412, 14449), 'cms.models.pluginmodel.CMSPlugin.objects.get', 'CMSPlugin.objects.get', ([], {'id': 'plugin_2.pk'}), '(id=plugin_2.pk)\n', (14433, 14449), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((14467, 14504), 'cms.models.pluginmodel.CMSPlugin.objects.get', 'CMSPlugin.objects.get', ([], {'id': 'plugin_4.pk'}), '(id=plugin_4.pk)\n', (14488, 14504), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((14522, 14559), 'cms.models.pluginmodel.CMSPlugin.objects.get', 'CMSPlugin.objects.get', ([], {'id': 'plugin_3.pk'}), '(id=plugin_3.pk)\n', (14543, 14559), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((15362, 15399), 'cms.models.pluginmodel.CMSPlugin.objects.get', 'CMSPlugin.objects.get', ([], {'id': 'plugin_6.pk'}), '(id=plugin_6.pk)\n', (15383, 15399), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((15782, 15819), 'cms.models.pluginmodel.CMSPlugin.objects.get', 'CMSPlugin.objects.get', ([], {'id': 'plugin_6.pk'}), '(id=plugin_6.pk)\n', (15803, 15819), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((15837, 15874), 'cms.models.pluginmodel.CMSPlugin.objects.get', 'CMSPlugin.objects.get', ([], {'id': 'plugin_7.pk'}), '(id=plugin_7.pk)\n', (15858, 15874), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((16067, 16104), 'cms.models.pluginmodel.CMSPlugin.objects.get', 'CMSPlugin.objects.get', ([], {'id': 'plugin_6.pk'}), '(id=plugin_6.pk)\n', (16088, 16104), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((16122, 16159), 'cms.models.pluginmodel.CMSPlugin.objects.get', 'CMSPlugin.objects.get', ([], {'id': 'plugin_7.pk'}), '(id=plugin_7.pk)\n', (16143, 16159), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((16564, 16601), 'cms.models.pluginmodel.CMSPlugin.objects.get', 'CMSPlugin.objects.get', ([], {'id': 'plugin_4.pk'}), '(id=plugin_4.pk)\n', (16585, 16601), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((16619, 16656), 'cms.models.pluginmodel.CMSPlugin.objects.get', 'CMSPlugin.objects.get', ([], {'id': 'plugin_8.pk'}), '(id=plugin_8.pk)\n', (16640, 16656), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((17036, 17073), 'cms.models.pluginmodel.CMSPlugin.objects.get', 'CMSPlugin.objects.get', ([], {'id': 'plugin_9.pk'}), '(id=plugin_9.pk)\n', (17057, 17073), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((17442, 17480), 'cms.models.pluginmodel.CMSPlugin.objects.get', 'CMSPlugin.objects.get', ([], {'id': 'plugin_10.pk'}), '(id=plugin_10.pk)\n', (17463, 17480), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((18015, 18052), 'cms.models.pluginmodel.CMSPlugin.objects.get', 'CMSPlugin.objects.get', ([], {'id': 'plugin_2.pk'}), '(id=plugin_2.pk)\n', (18036, 18052), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((18070, 18107), 'cms.models.pluginmodel.CMSPlugin.objects.get', 'CMSPlugin.objects.get', ([], {'id': 'plugin_3.pk'}), '(id=plugin_3.pk)\n', (18091, 18107), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((18545, 18583), 'cms.models.pluginmodel.CMSPlugin.objects.get', 'CMSPlugin.objects.get', ([], {'id': 'plugin_12.pk'}), '(id=plugin_12.pk)\n', (18566, 18583), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((18601, 18638), 'cms.models.pluginmodel.CMSPlugin.objects.get', 'CMSPlugin.objects.get', ([], {'id': 'plugin_4.pk'}), '(id=plugin_4.pk)\n', (18622, 18638), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((18656, 18693), 'cms.models.pluginmodel.CMSPlugin.objects.get', 'CMSPlugin.objects.get', ([], {'id': 'plugin_8.pk'}), '(id=plugin_8.pk)\n', (18677, 18693), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((19135, 19172), 'cms.models.pluginmodel.CMSPlugin.objects.get', 'CMSPlugin.objects.get', ([], {'id': 'plugin_6.pk'}), '(id=plugin_6.pk)\n', (19156, 19172), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((19190, 19227), 'cms.models.pluginmodel.CMSPlugin.objects.get', 'CMSPlugin.objects.get', ([], {'id': 'plugin_7.pk'}), '(id=plugin_7.pk)\n', (19211, 19227), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((19245, 19283), 'cms.models.pluginmodel.CMSPlugin.objects.get', 'CMSPlugin.objects.get', ([], {'id': 'plugin_13.pk'}), '(id=plugin_13.pk)\n', (19266, 19283), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((19622, 19660), 'cms.models.pluginmodel.CMSPlugin.objects.get', 'CMSPlugin.objects.get', ([], {'id': 'plugin_11.pk'}), '(id=plugin_11.pk)\n', (19643, 19660), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((19678, 19715), 'cms.models.pluginmodel.CMSPlugin.objects.get', 'CMSPlugin.objects.get', ([], {'id': 'plugin_1.pk'}), '(id=plugin_1.pk)\n', (19699, 19715), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((19733, 19770), 'cms.models.pluginmodel.CMSPlugin.objects.get', 'CMSPlugin.objects.get', ([], {'id': 'plugin_5.pk'}), '(id=plugin_5.pk)\n', (19754, 19770), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((19788, 19826), 'cms.models.pluginmodel.CMSPlugin.objects.get', 'CMSPlugin.objects.get', ([], {'id': 'plugin_14.pk'}), '(id=plugin_14.pk)\n', (19809, 19826), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((21640, 21746), 'cms.models.pluginmodel.CMSPlugin.objects.filter', 'CMSPlugin.objects.filter', ([], {'placeholder_id': 'plugin_3.placeholder_id', 'language': 'plugin_3.language', 'depth': '(1)'}), '(placeholder_id=plugin_3.placeholder_id, language=\n plugin_3.language, depth=1)\n', (21664, 21746), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((42536, 42561), 'cms.models.pluginmodel.CMSPlugin.objects.count', 'CMSPlugin.objects.count', ([], {}), '()\n', (42559, 42561), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((7071, 7116), 'cms.models.pluginmodel.CMSPlugin.objects.filter', 'CMSPlugin.objects.filter', ([], {'parent__isnull': '(True)'}), '(parent__isnull=True)\n', (7095, 7116), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((13013, 13050), 'cms.models.pluginmodel.CMSPlugin.objects.get', 'CMSPlugin.objects.get', ([], {'id': 'plugin_1.pk'}), '(id=plugin_1.pk)\n', (13034, 13050), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((13578, 13615), 'cms.models.pluginmodel.CMSPlugin.objects.get', 'CMSPlugin.objects.get', ([], {'id': 'plugin_1.pk'}), '(id=plugin_1.pk)\n', (13599, 13615), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((14067, 14104), 'cms.models.pluginmodel.CMSPlugin.objects.get', 'CMSPlugin.objects.get', ([], {'id': 'plugin_2.pk'}), '(id=plugin_2.pk)\n', (14088, 14104), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((14266, 14303), 'cms.models.pluginmodel.CMSPlugin.objects.get', 'CMSPlugin.objects.get', ([], {'id': 'plugin_1.pk'}), '(id=plugin_1.pk)\n', (14287, 14303), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((14757, 14802), 'cms.models.pluginmodel.CMSPlugin.objects.filter', 'CMSPlugin.objects.filter', ([], {'parent__isnull': '(True)'}), '(parent__isnull=True)\n', (14781, 14802), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((15295, 15332), 'cms.models.pluginmodel.CMSPlugin.objects.get', 'CMSPlugin.objects.get', ([], {'id': 'plugin_5.pk'}), '(id=plugin_5.pk)\n', (15316, 15332), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((15698, 15735), 'cms.models.pluginmodel.CMSPlugin.objects.get', 'CMSPlugin.objects.get', ([], {'id': 'plugin_5.pk'}), '(id=plugin_5.pk)\n', (15719, 15735), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((15980, 16017), 'cms.models.pluginmodel.CMSPlugin.objects.get', 'CMSPlugin.objects.get', ([], {'id': 'plugin_5.pk'}), '(id=plugin_5.pk)\n', (16001, 16017), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((16480, 16517), 'cms.models.pluginmodel.CMSPlugin.objects.get', 'CMSPlugin.objects.get', ([], {'id': 'plugin_2.pk'}), '(id=plugin_2.pk)\n', (16501, 16517), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((16969, 17006), 'cms.models.pluginmodel.CMSPlugin.objects.get', 'CMSPlugin.objects.get', ([], {'id': 'plugin_3.pk'}), '(id=plugin_3.pk)\n', (16990, 17006), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((17375, 17412), 'cms.models.pluginmodel.CMSPlugin.objects.get', 'CMSPlugin.objects.get', ([], {'id': 'plugin_4.pk'}), '(id=plugin_4.pk)\n', (17396, 17412), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((17931, 17968), 'cms.models.pluginmodel.CMSPlugin.objects.get', 'CMSPlugin.objects.get', ([], {'id': 'plugin_1.pk'}), '(id=plugin_1.pk)\n', (17952, 17968), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((18461, 18498), 'cms.models.pluginmodel.CMSPlugin.objects.get', 'CMSPlugin.objects.get', ([], {'id': 'plugin_2.pk'}), '(id=plugin_2.pk)\n', (18482, 18498), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((19051, 19088), 'cms.models.pluginmodel.CMSPlugin.objects.get', 'CMSPlugin.objects.get', ([], {'id': 'plugin_5.pk'}), '(id=plugin_5.pk)\n', (19072, 19088), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((19540, 19573), 'cms.models.pluginmodel.CMSPlugin.objects.filter', 'CMSPlugin.objects.filter', ([], {'depth': '(1)'}), '(depth=1)\n', (19564, 19573), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((28043, 28111), 'cms.models.pluginmodel.CMSPlugin.objects.filter', 'CMSPlugin.objects.filter', ([], {'placeholder__page__publisher_is_draft': '(True)'}), '(placeholder__page__publisher_is_draft=True)\n', (28067, 28111), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((29094, 29151), 'cms.models.placeholdermodel.Placeholder.objects.filter', 'Placeholder.objects.filter', ([], {'page__publisher_is_draft': '(True)'}), '(page__publisher_is_draft=True)\n', (29120, 29151), False, 'from cms.models.placeholdermodel import Placeholder\n'), ((29631, 29652), 'cms.models.Page.objects.drafts', 'Page.objects.drafts', ([], {}), '()\n', (29650, 29652), False, 'from cms.models import Page\n'), ((29702, 29759), 'cms.models.placeholdermodel.Placeholder.objects.filter', 'Placeholder.objects.filter', ([], {'page__publisher_is_draft': '(True)'}), '(page__publisher_is_draft=True)\n', (29728, 29759), False, 'from cms.models.placeholdermodel import Placeholder\n'), ((30147, 30215), 'cms.models.pluginmodel.CMSPlugin.objects.filter', 'CMSPlugin.objects.filter', ([], {'placeholder__page__publisher_is_draft': '(True)'}), '(placeholder__page__publisher_is_draft=True)\n', (30171, 30215), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((30369, 30390), 'cms.models.Page.objects.drafts', 'Page.objects.drafts', ([], {}), '()\n', (30388, 30390), False, 'from cms.models import Page\n'), ((30442, 30499), 'cms.models.placeholdermodel.Placeholder.objects.filter', 'Placeholder.objects.filter', ([], {'page__publisher_is_draft': '(True)'}), '(page__publisher_is_draft=True)\n', (30468, 30499), False, 'from cms.models.placeholdermodel import Placeholder\n'), ((42390, 42447), 'cms.models.placeholdermodel.Placeholder.objects.filter', 'Placeholder.objects.filter', ([], {'page__publisher_is_draft': '(True)'}), '(page__publisher_is_draft=True)\n', (42416, 42447), False, 'from cms.models.placeholdermodel import Placeholder\n'), ((42841, 42862), 'cms.models.Page.objects.drafts', 'Page.objects.drafts', ([], {}), '()\n', (42860, 42862), False, 'from cms.models import Page\n'), ((42912, 42969), 'cms.models.placeholdermodel.Placeholder.objects.filter', 'Placeholder.objects.filter', ([], {'page__publisher_is_draft': '(True)'}), '(page__publisher_is_draft=True)\n', (42938, 42969), False, 'from cms.models.placeholdermodel import Placeholder\n'), ((44887, 44908), 'cms.models.Page.objects.drafts', 'Page.objects.drafts', ([], {}), '()\n', (44906, 44908), False, 'from cms.models import Page\n'), ((44960, 45017), 'cms.models.placeholdermodel.Placeholder.objects.filter', 'Placeholder.objects.filter', ([], {'page__publisher_is_draft': '(True)'}), '(page__publisher_is_draft=True)\n', (44986, 45017), False, 'from cms.models.placeholdermodel import Placeholder\n'), ((2192, 2224), 'djangocms_text_ckeditor.models.Text.objects.get', 'Text.objects.get', ([], {'id': 'original.id'}), '(id=original.id)\n', (2208, 2224), False, 'from djangocms_text_ckeditor.models import Text\n'), ((2251, 2279), 'djangocms_text_ckeditor.models.Text.objects.get', 'Text.objects.get', ([], {'id': 'copy.id'}), '(id=copy.id)\n', (2267, 2279), False, 'from djangocms_text_ckeditor.models import Text\n'), ((26377, 26400), 'cms.models.pluginmodel.CMSPlugin.objects.all', 'CMSPlugin.objects.all', ([], {}), '()\n', (26398, 26400), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((29240, 29308), 'cms.models.pluginmodel.CMSPlugin.objects.filter', 'CMSPlugin.objects.filter', ([], {'placeholder__page__publisher_is_draft': '(True)'}), '(placeholder__page__publisher_is_draft=True)\n', (29264, 29308), False, 'from cms.models.pluginmodel import CMSPlugin\n'), ((39722, 39745), 'cms.models.pluginmodel.CMSPlugin.objects.all', 'CMSPlugin.objects.all', ([], {}), '()\n', (39743, 39745), False, 'from cms.models.pluginmodel import CMSPlugin\n')]
|
#
# Collective Knowledge (individual environment - setup)
#
# See CK LICENSE.txt for licensing details
# See CK COPYRIGHT.txt for copyright details
#
import os
import sys
import json
##############################################################################
def version_cmd(i):
path_with_init_py = i['full_path'] # the full_path that ends with PACKAGE_NAME/__init__.py
path_without_init_py = os.path.dirname( path_with_init_py )
package_name = os.path.basename( path_without_init_py )
site_dir = os.path.dirname( path_without_init_py )
ck = i['ck_kernel']
cus = i['customize']
version_module_name = cus.get('version_module_name', '__init__')
version_variable_name = cus.get('version_variable_name', '__version__')
#sys.path.insert(0, site_dir) # temporarily prepend site_dir to allow the potential recursive imports to work
rx=ck.load_module_from_path({'path':path_without_init_py, 'module_code_name':version_module_name, 'skip_init':'yes'})
#sys.path.pop(0) # retain the original module search path
if rx['return']==0:
loaded_package = rx['code']
version_string_json = getattr(loaded_package, version_variable_name, "0")
if version_string_json == "0":
with open(rx['path']) as version:
file_contents = version.readlines()
version_string = file_contents[3]
else:
version_string_dict = json.loads(version_string_json)
version_string=version_string_dict['version']
else:
ck.out('Failed to import package '+package_name+' : '+rx['error'])
version_string = ''
return {'return':0, 'cmd':'', 'version':version_string}
##############################################################################
def dirs(i):
hosd = i['host_os_dict']
macos = hosd.get('macos','')
#dirs = []
dirs = i.get('dirs', [])
if macos:
python_site_packages_dir = os.path.expanduser("~") + "/Library/Python"
if os.path.isdir( python_site_packages_dir ):
dirs.append( python_site_packages_dir )
return {'return':0, 'dirs':dirs}
##############################################################################
def setup(i):
"""
Input: {
cfg - meta of this soft entry
self_cfg - meta of module soft
ck_kernel - import CK kernel module (to reuse functions)
host_os_uoa - host OS UOA
host_os_uid - host OS UID
host_os_dict - host OS meta
target_os_uoa - target OS UOA
target_os_uid - target OS UID
target_os_dict - target OS meta
target_device_id - target device ID (if via ADB)
tags - list of tags used to search this entry
env - updated environment vars from meta
customize - updated customize vars from meta
deps - resolved dependencies for this soft
interactive - if 'yes', can ask questions, otherwise quiet
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
bat - prepared string for bat file
}
"""
# Get variables
ck=i['ck_kernel']
iv=i.get('interactive','')
cus=i.get('customize',{})
hosd=i['host_os_dict']
tosd=i['target_os_dict']
winh=hosd.get('windows_base','')
full_path = cus.get('full_path','')
path_lib = os.path.dirname(full_path)
path_install = os.path.dirname(path_lib)
env = i['env']
env['PYTHONPATH'] = path_install + ( ';%PYTHONPATH%' if winh=='yes' else ':${PYTHONPATH}')
return {'return':0, 'bat':''}
|
[
"json.loads",
"os.path.basename",
"os.path.isdir",
"os.path.dirname",
"os.path.expanduser"
] |
[((443, 477), 'os.path.dirname', 'os.path.dirname', (['path_with_init_py'], {}), '(path_with_init_py)\n', (458, 477), False, 'import os\n'), ((510, 548), 'os.path.basename', 'os.path.basename', (['path_without_init_py'], {}), '(path_without_init_py)\n', (526, 548), False, 'import os\n'), ((581, 618), 'os.path.dirname', 'os.path.dirname', (['path_without_init_py'], {}), '(path_without_init_py)\n', (596, 618), False, 'import os\n'), ((3839, 3865), 'os.path.dirname', 'os.path.dirname', (['full_path'], {}), '(full_path)\n', (3854, 3865), False, 'import os\n'), ((3896, 3921), 'os.path.dirname', 'os.path.dirname', (['path_lib'], {}), '(path_lib)\n', (3911, 3921), False, 'import os\n'), ((2131, 2170), 'os.path.isdir', 'os.path.isdir', (['python_site_packages_dir'], {}), '(python_site_packages_dir)\n', (2144, 2170), False, 'import os\n'), ((1551, 1582), 'json.loads', 'json.loads', (['version_string_json'], {}), '(version_string_json)\n', (1561, 1582), False, 'import json\n'), ((2076, 2099), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (2094, 2099), False, 'import os\n')]
|
import copy
from .exceptions import UnknownKeyException
class ExtendedPublicBundle:
"""
This class looks exactly the same as the PublicBundle class, but the types of the
fields are a bit different:
The spk field is not a key, but a dictionary containing the key and the id:
spk = {
"key" : key,
"id" : id
}
The otpks field is not an array of keys, but an array of dictionaries containing the
key and the id:
otpks = [
{
"key" : key,
"id" : id
},
{
"key" : key,
"id" : id
},
...
]
"""
def __init__(self, ik, spk, spk_signature, otpks):
self.__ik = ik
self.__spk = copy.deepcopy(spk)
self.__spk_signature = spk_signature
self.__otpks = copy.deepcopy(otpks)
@classmethod
def parse(cls, backend, ik, spk, spk_signature, otpks):
"""
Use this method when creating a bundle from data you retrieved directly from some
PEP node. This method applies an additional decoding step to the public keys in
the bundle. Pass the same structure as the constructor expects.
"""
ik = backend.decodePublicKey(ik)[0]
spk = {
"key" : backend.decodePublicKey(spk["key"])[0],
"id" : spk["id"]
}
otpks = list(map(lambda otpk: {
"key" : backend.decodePublicKey(otpk["key"])[0],
"id" : otpk["id"]
}, otpks))
return cls(ik, spk, spk_signature, otpks)
def serialize(self, backend):
"""
Use this method to prepare the data to be uploaded directly to some PEP node. This
method applies an additional encoding step to the public keys in the bundle. The
result is a dictionary with the keys ik, spk, spk_signature and otpks. The values
are structured the same way as the inputs of the constructor.
"""
return {
"ik": backend.encodePublicKey(self.ik, "25519"),
"spk": {
"id" : self.spk["id"],
"key" : backend.encodePublicKey(self.spk["key"], "25519"),
},
"spk_signature": self.spk_signature,
"otpks": list(map(lambda otpk: {
"id" : otpk["id"],
"key" : backend.encodePublicKey(otpk["key"], "25519")
}, self.otpks))
}
@property
def ik(self):
return self.__ik
@property
def spk(self):
return self.__spk
@property
def spk_signature(self):
return self.__spk_signature
@property
def otpks(self):
return self.__otpks
def findOTPKId(self, otpk):
otpks = list(filter(lambda x: x["key"] == otpk, self.otpks))
if len(otpks) != 1:
raise UnknownKeyException("Tried to get the id of an unknown OTPK.")
return otpks[0]["id"]
def findSPKId(self, spk):
# If the requested spk is the one contained in this bundle...
if self.spk["key"] == spk:
# ...return the id
return self.spk["id"]
raise UnknownKeyException("Tried to get the id of an unknown SPK.")
def __eq__(self, other):
try:
return (
self.ik == other.ik and
self.spk == other.spk and
self.spk_signature == other.spk_signature and
self.otpks == other.otpks
)
except:
return False
|
[
"copy.deepcopy"
] |
[((739, 757), 'copy.deepcopy', 'copy.deepcopy', (['spk'], {}), '(spk)\n', (752, 757), False, 'import copy\n'), ((826, 846), 'copy.deepcopy', 'copy.deepcopy', (['otpks'], {}), '(otpks)\n', (839, 846), False, 'import copy\n')]
|
# (C) Datadog, Inc. 2010-2017
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
from urlparse import urljoin
import csv
import time
import threading
import random
import string
import re
# 3rd party
import requests
import json
from nose.plugins.attrib import attr
# project
from checks import AgentCheck
from tests.checks.common import AgentCheckTest
@attr(requires='couch')
@attr(couch_version='1.x')
class CouchTestCase(AgentCheckTest):
CHECK_NAME = 'couch'
# Publicly readable databases
DB_NAMES = ['_replicator', '_users', 'kennel']
GLOBAL_GAUGES = [
'couchdb.couchdb.auth_cache_hits',
'couchdb.couchdb.auth_cache_misses',
'couchdb.httpd.requests',
'couchdb.httpd_request_methods.GET',
'couchdb.httpd_request_methods.PUT',
'couchdb.couchdb.request_time',
'couchdb.couchdb.open_os_files',
'couchdb.couchdb.open_databases',
'couchdb.httpd_status_codes.200',
'couchdb.httpd_status_codes.201',
'couchdb.httpd_status_codes.400',
'couchdb.httpd_status_codes.401',
'couchdb.httpd_status_codes.404',
]
CHECK_GAUGES = [
'couchdb.by_db.disk_size',
'couchdb.by_db.doc_count',
]
def __init__(self, *args, **kwargs):
AgentCheckTest.__init__(self, *args, **kwargs)
self.config = {"instances": [{"server": "http://localhost:5984"}]}
def test_couch(self):
self.run_check(self.config)
# Metrics should have been emitted for any publicly readable databases.
for db_name in self.DB_NAMES:
tags = ['instance:http://localhost:5984', 'db:{0}'.format(db_name)]
for gauge in self.CHECK_GAUGES:
self.assertMetric(gauge, tags=tags, count=1)
# Check global metrics
for gauge in self.GLOBAL_GAUGES:
tags = ['instance:http://localhost:5984']
self.assertMetric(gauge, tags=tags, at_least=0)
self.assertServiceCheck(self.check.SERVICE_CHECK_NAME,
status=AgentCheck.OK,
tags=['instance:http://localhost:5984'],
count=2) # One per DB + one to get the version
self.coverage_report()
def test_bad_config(self):
self.assertRaises(
Exception,
lambda: self.run_check({"instances": [{"server": "http://localhost:5985"}]})
)
self.assertServiceCheck(self.check.SERVICE_CHECK_NAME,
status=AgentCheck.CRITICAL,
tags=['instance:http://localhost:5985'],
count=1)
def test_couch_whitelist(self):
DB_WHITELIST = ["_users"]
self.config['instances'][0]['db_whitelist'] = DB_WHITELIST
self.run_check(self.config)
for db_name in self.DB_NAMES:
tags = ['instance:http://localhost:5984', 'db:{0}'.format(db_name)]
for gauge in self.CHECK_GAUGES:
if db_name in DB_WHITELIST:
self.assertMetric(gauge, tags=tags, count=1)
else:
self.assertMetric(gauge, tags=tags, count=0)
def test_couch_blacklist(self):
DB_BLACKLIST = ["_replicator"]
self.config['instances'][0]['db_blacklist'] = DB_BLACKLIST
self.run_check(self.config)
for db_name in self.DB_NAMES:
tags = ['instance:http://localhost:5984', 'db:{0}'.format(db_name)]
for gauge in self.CHECK_GAUGES:
if db_name in DB_BLACKLIST:
self.assertMetric(gauge, tags=tags, count=0)
else:
self.assertMetric(gauge, tags=tags, count=1)
def test_only_max_nodes_are_scanned(self):
self.config['instances'][0]['max_dbs_per_check'] = 1
self.run_check(self.config)
for db_name in self.DB_NAMES[1:]:
tags = ['instance:http://localhost:5984', 'db:{0}'.format(db_name)]
for gauge in self.CHECK_GAUGES:
self.assertMetric(gauge, tags=tags, count=0)
@attr(requires='couch')
@attr(couch_version='2.x')
class TestCouchdb2(AgentCheckTest):
"""Basic Test for couchdb2 integration."""
CHECK_NAME = 'couch'
NODE1 = {
'server': 'http://127.0.0.1:5984',
'user': 'dduser',
'password': '<PASSWORD>',
'name': 'node1@127.0.0.1'
}
NODE2 = {
'server': 'http://127.0.0.1:5984',
'user': 'dduser',
'password': '<PASSWORD>',
'name': 'node2@127.0.0.1'
}
NODE3 = {
'server': 'http://127.0.0.1:5984',
'user': 'dduser',
'password': '<PASSWORD>',
'name': 'node3@127.0.0.1'
}
def __init__(self, *args, **kwargs):
AgentCheckTest.__init__(self, *args, **kwargs)
self.cluster_gauges = []
self.by_db_gauges = []
self.erlang_gauges = []
self.replication_tasks_gauges = []
self.compaction_tasks_gauges = []
self.indexing_tasks_gauges = []
self.view_compaction_tasks_gauges = []
self.by_dd_gauges = []
with open('couch/metadata.csv', 'rb') as csvfile:
reader = csv.reader(csvfile)
reader.next() # This one skips the headers
for row in reader:
if row[0] in ['couchdb.couchdb.request_time', 'couchdb.by_db.disk_size']:
# Skip CouchDB 1.x specific metrics
continue
elif row[0].startswith("couchdb.by_db."):
self.by_db_gauges.append(row[0])
elif row[0].startswith("couchdb.erlang"):
self.erlang_gauges.append(row[0])
elif row[0] in ['couchdb.active_tasks.replication.count', 'couchdb.active_tasks.db_compaction.count', 'couchdb.active_tasks.indexer.count', 'couchdb.active_tasks.view_compaction.count']:
self.cluster_gauges.append(row[0])
elif row[0].startswith("couchdb.active_tasks.replication"):
self.replication_tasks_gauges.append(row[0])
elif row[0].startswith("couchdb.active_tasks.db_compaction"):
self.compaction_tasks_gauges.append(row[0])
elif row[0].startswith("couchdb.active_tasks.indexer"):
self.indexing_tasks_gauges.append(row[0])
elif row[0].startswith("couchdb.active_tasks.view_compaction"):
self.view_compaction_tasks_gauges.append(row[0])
elif row[0].startswith("couchdb.by_ddoc."):
self.by_dd_gauges.append(row[0])
else:
self.cluster_gauges.append(row[0])
def test_check(self):
"""
Testing Couchdb2 check.
"""
self.run_check({"instances": [self.NODE1, self.NODE2, self.NODE3]})
tags = map(lambda n: "instance:{0}".format(n['name']), [self.NODE1, self.NODE2, self.NODE3])
for tag in tags:
for gauge in self.cluster_gauges:
self.assertMetric(gauge, tags=[tag])
for db in ['_users', '_global_changes', '_metadata', '_replicator', 'kennel']:
for gauge in self.by_db_gauges:
self.assertMetric(gauge, tags=[tag, "db:{0}".format(db)])
for gauge in self.erlang_gauges:
self.assertMetric(gauge)
for db, dd in {"kennel": "dummy", "_replicator": "_replicator", "_users": "_auth"}.items():
for gauge in self.by_dd_gauges:
self.assertMetric(gauge, tags=[tag, "design_document:{0}".format(dd), "language:javascript", "db:{0}".format(db)])
self.assertServiceCheck(self.check.SERVICE_CHECK_NAME,
status=AgentCheck.OK,
tags=["instance:{0}".format(self.NODE1["name"])],
count=2) # One for the version, one for the server stats
for node in [self.NODE2, self.NODE3]:
self.assertServiceCheck(self.check.SERVICE_CHECK_NAME,
status=AgentCheck.OK,
tags=["instance:{0}".format(node["name"])],
count=1) # One for the server stats, the version is already loaded
# Raises when COVERAGE=true and coverage < 100%
self.coverage_report()
def test_bad_config(self):
conf = self.NODE1.copy()
conf.pop('server')
self.assertRaises(
Exception,
lambda: self.run_check({"instances": [conf]})
)
def test_wrong_config(self):
conf = self.NODE1.copy()
conf['server'] = "http://127.0.0.1:11111"
self.assertRaises(
Exception,
lambda: self.run_check({"instances": [conf]})
)
self.assertServiceCheck(self.check.SERVICE_CHECK_NAME,
status=AgentCheck.CRITICAL,
tags=["instance:{0}".format(conf['name'])],
count=1)
def test_db_whitelisting(self):
confs = []
for n in [self.NODE1, self.NODE2, self.NODE3]:
node = n.copy()
node['db_whitelist'] = ['kennel']
confs.append(node)
self.run_check({"instances": confs})
for n in confs:
for db in ['_users', '_global_changes', '_metadata', '_replicator']:
tags = ["instance:{0}".format(n['name']), "db:{0}".format(db)]
for gauge in self.by_db_gauges:
self.assertMetric(gauge, tags=tags, count=0)
tags = ["instance:{0}".format(n['name']), 'db:kennel']
for gauge in self.by_db_gauges:
self.assertMetric(gauge, tags=tags)
def test_db_blacklisting(self):
confs = []
for n in [self.NODE1, self.NODE2, self.NODE3]:
node = n.copy()
node['db_blacklist'] = ['kennel']
confs.append(node)
self.run_check({"instances": confs})
for n in confs:
for db in ['_users', '_global_changes', '_metadata', '_replicator']:
tags = ["instance:{0}".format(n['name']), "db:{0}".format(db)]
for gauge in self.by_db_gauges:
self.assertMetric(gauge, tags=tags)
tags = ["instance:{0}".format(n['name']), 'db:kennel']
for gauge in self.by_db_gauges:
self.assertMetric(gauge, tags=tags, count=0)
def test_check_without_names(self):
conf = self.NODE1.copy()
conf.pop('name')
self.run_check({"instances": [conf]})
tags = map(lambda n: "instance:{0}".format(n['name']), [self.NODE1, self.NODE2, self.NODE3])
for tag in tags:
for gauge in self.cluster_gauges:
self.assertMetric(gauge, tags=[tag])
for db in ['_users', '_global_changes', '_metadata', '_replicator', 'kennel']:
tags = [tag, "db:{0}".format(db)]
for gauge in self.by_db_gauges:
self.assertMetric(gauge, tags=tags)
for gauge in self.erlang_gauges:
self.assertMetric(gauge)
for db, dd in {"kennel": "dummy", "_replicator": "_replicator", "_users": "_auth"}.items():
for gauge in self.by_dd_gauges:
self.assertMetric(gauge, tags=[tag, "design_document:{0}".format(dd), "language:javascript", "db:{0}".format(db)])
self.assertServiceCheck(self.check.SERVICE_CHECK_NAME,
status=AgentCheck.OK,
tags=["instance:{0}".format(conf["server"])],
count=1) # One for the version as we don't have any names to begin with
for node in [self.NODE1, self.NODE2, self.NODE3]:
self.assertServiceCheck(self.check.SERVICE_CHECK_NAME,
status=AgentCheck.OK,
tags=["instance:{0}".format(node["name"])],
count=1) # One for the server stats, the version is already loaded
# Raises when COVERAGE=true and coverage < 100%
self.coverage_report()
def test_only_max_nodes_are_scanned(self):
conf = self.NODE1.copy()
conf.pop('name')
conf['max_nodes_per_check'] = 2
self.run_check({"instances": [conf]})
for gauge in self.erlang_gauges:
self.assertMetric(gauge)
tags = map(lambda n: "instance:{0}".format(n['name']), [self.NODE1, self.NODE2])
for tag in tags:
for gauge in self.cluster_gauges:
self.assertMetric(gauge, tags=[tag])
for db in ['_users', '_global_changes', '_metadata', '_replicator', 'kennel']:
tags = [tag, "db:{0}".format(db)]
for gauge in self.by_db_gauges:
self.assertMetric(gauge, tags=tags)
for db, dd in {"kennel": "dummy", "_replicator": "_replicator", "_users": "_auth"}.items():
for gauge in self.by_dd_gauges:
self.assertMetric(gauge, tags=[tag, "design_document:{0}".format(dd), "language:javascript", "db:{0}".format(db)])
self.assertServiceCheck(self.check.SERVICE_CHECK_NAME,
status=AgentCheck.OK,
tags=["instance:{0}".format(conf["server"])],
count=1) # One for the version as we don't have any names to begin with
for node in [self.NODE1, self.NODE2]:
self.assertServiceCheck(self.check.SERVICE_CHECK_NAME,
status=AgentCheck.OK,
tags=["instance:{0}".format(node["name"])],
count=1) # One for the server stats, the version is already loaded
tags = ["instance:{0}".format(self.NODE3['name'])]
for gauge in self.cluster_gauges:
self.assertMetric(gauge, tags=tags, count=0)
for db in ['_users', '_global_changes', '_metadata', '_replicator', 'kennel']:
tags = [tags[0], "db:{0}".format(db)]
for gauge in self.by_db_gauges:
self.assertMetric(gauge, tags=tags, count=0)
self.assertServiceCheck(self.check.SERVICE_CHECK_NAME,
status=AgentCheck.OK,
tags=tags,
count=0)
# Raises when COVERAGE=true and coverage < 100%
self.coverage_report()
def test_only_max_dbs_are_scanned(self):
confs = []
for n in [self.NODE1, self.NODE2, self.NODE3]:
node = n.copy()
node['max_dbs_per_check'] = 1
confs.append(node)
self.run_check({"instances": confs})
for n in confs:
for db in ['kennel', '_users', '_metadata', '_replicator']:
tags = ["instance:{0}".format(n['name']), "db:{0}".format(db)]
for gauge in self.by_db_gauges:
self.assertMetric(gauge, tags=tags, count=0)
tags = ["instance:{0}".format(n['name']), 'db:_global_changes']
for gauge in self.by_db_gauges:
self.assertMetric(gauge, tags=tags, count=1)
def test_replication_metrics(self):
url = self.NODE1['server'] + '/_replicator'
replication_body = {
'_id': 'my_replication_id',
'source': 'http://dduser:pawprint@127.0.0.1:5984/kennel',
'target': 'http://dduser:pawprint@127.0.0.1:5984/kennel_replica',
'create_target': True,
'continuous': True
}
r = requests.post(url, auth=(self.NODE1['user'], self.NODE1['password']), headers={'Content-Type': 'application/json'}, data=json.dumps(replication_body))
r.raise_for_status()
count = 0
attempts = 0
while count != 1 and attempts < 20:
attempts += 1
time.sleep(1)
r = requests.get(self.NODE1['server'] + '/_active_tasks', auth=(self.NODE1['user'], self.NODE1['password']))
r.raise_for_status()
count = len(r.json())
self.run_check({"instances": [self.NODE1, self.NODE2, self.NODE3]})
for gauge in self.replication_tasks_gauges:
self.assertMetric(gauge)
def test_compaction_metrics(self):
url = urljoin(self.NODE1['server'], 'kennel')
body = {
'_id': 'fsdr2345fgwert249i9fg9drgsf4SDFGWE',
'data': str(time.time())
}
r = requests.post(url, auth=(self.NODE1['user'], self.NODE1['password']), headers={'Content-Type': 'application/json'}, data=json.dumps(body))
r.raise_for_status()
update_url = urljoin(self.NODE1['server'], 'kennel/{0}'.format(body['_id']))
for _ in xrange(50):
rev = r.json()['rev']
body['data'] = str(time.time())
body['_rev'] = rev
r = requests.put(update_url, auth=(self.NODE1['user'], self.NODE1['password']), headers={'Content-Type': 'application/json'}, data=json.dumps(body))
r.raise_for_status()
r2 = requests.post(url, auth=(self.NODE1['user'], self.NODE1['password']), headers={'Content-Type': 'application/json'}, data=json.dumps({"_id": str(time.time())}))
r2.raise_for_status()
url = urljoin(self.NODE1['server'], 'kennel/_compact')
r = requests.post(url, auth=(self.NODE1['user'], self.NODE1['password']), headers={'Content-Type': 'application/json'})
r.raise_for_status()
url = urljoin(self.NODE1['server'], '_global_changes/_compact')
r = requests.post(url, auth=(self.NODE1['user'], self.NODE1['password']), headers={'Content-Type': 'application/json'})
r.raise_for_status()
self.run_check({"instances": [self.NODE1, self.NODE2, self.NODE3]})
for gauge in self.compaction_tasks_gauges:
self.assertMetric(gauge)
def test_indexing_metrics(self):
url = urljoin(self.NODE1['server'], 'kennel')
for _ in xrange(50):
r = requests.post(url, auth=(self.NODE1['user'], self.NODE1['password']), headers={'Content-Type': 'application/json'}, data=json.dumps({"_id": str(time.time())}))
r.raise_for_status()
class AsyncReq(threading.Thread):
def __init__(self, url, auth):
self._url = url
self._auth = auth
threading.Thread.__init__(self)
def run(self):
r = requests.get(self._url, auth=self._auth)
r.raise_for_status()
url = urljoin(self.NODE1['server'], 'kennel/_design/dummy/_view/all')
t = AsyncReq(url, (self.NODE1['user'], self.NODE1['password']))
t.start()
self.run_check({"instances": [self.NODE1, self.NODE2, self.NODE3]})
t.join()
for node in [self.NODE1, self.NODE2, self.NODE3]:
for gauge in self.indexing_tasks_gauges:
self.assertMetric(gauge, tags=['database:kennel', 'design_document:dummy', 'instance:{0}'.format(node['name'])])
def test_view_compaction_metrics(self):
class LoadGenerator(threading.Thread):
STOP = 0
RUN = 1
def __init__(self, server, auth):
self._server = server
self._auth = auth
self._status = self.RUN
threading.Thread.__init__(self)
def run(self):
docs = []
count = 0
while self._status == self.RUN:
count += 1
if count % 5 == 0:
self.compact_views()
theid = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10))
docs.append(self.post_doc(theid))
docs = map(lambda x: self.update_doc(x), docs)
self.generate_views()
def generate_views(self):
url = urljoin(self._server, 'kennel/_design/dummy/_view/all')
try:
r = requests.get(url, auth=self._auth, timeout=1)
r.raise_for_status()
except requests.exceptions.Timeout:
None
url = urljoin(self._server, 'kennel/_design/dummy/_view/by_data')
try:
r = requests.get(url, auth=self._auth, timeout=1)
r.raise_for_status()
except requests.exceptions.Timeout:
None
def update_doc(self, doc):
body = {
'data': str(random.randint(0, 1000000000)),
'_rev': doc['rev']
}
r = requests.put(urljoin(self._server, 'kennel/{0}'.format(doc['id'])), auth=self._auth, headers={'Content-Type': 'application/json'}, data=json.dumps(body))
r.raise_for_status()
return r.json()
def post_doc(self, doc_id):
body = {
"_id": doc_id,
"data": str(time.time())
}
r = requests.post(urljoin(self._server, 'kennel'), auth=self._auth, headers={'Content-Type': 'application/json'}, data=json.dumps(body))
r.raise_for_status()
return r.json()
def compact_views(self):
url = urljoin(self._server, 'kennel/_compact/dummy')
r = requests.post(url, auth=self._auth, headers={'Content-Type': 'application/json'})
r.raise_for_status()
def stop(self):
self._status = self.STOP
threads = []
for _ in range(40):
t = LoadGenerator(self.NODE1['server'], (self.NODE1['user'], self.NODE1['password']))
t.start()
threads.append(t)
tries = 0
try:
metric_found = False
while not metric_found and tries < 20:
tries += 1
self.run_check({"instances": [self.NODE1, self.NODE2, self.NODE3]})
for m_name, ts, val, mdata in self.metrics:
if re.search('view_compaction\.progress', m_name) is not None:
metric_found = True
for gauge in self.view_compaction_tasks_gauges:
self.assertMetric(gauge)
break
finally:
for t in threads:
t.stop()
for t in threads:
t.join()
if tries >= 20:
self.fail("Could not find the view_compaction happening")
|
[
"threading.Thread.__init__",
"csv.reader",
"random.randint",
"random.choice",
"json.dumps",
"time.sleep",
"urlparse.urljoin",
"time.time",
"requests.get",
"re.search",
"requests.post",
"nose.plugins.attrib.attr",
"tests.checks.common.AgentCheckTest.__init__"
] |
[((392, 414), 'nose.plugins.attrib.attr', 'attr', ([], {'requires': '"""couch"""'}), "(requires='couch')\n", (396, 414), False, 'from nose.plugins.attrib import attr\n'), ((416, 441), 'nose.plugins.attrib.attr', 'attr', ([], {'couch_version': '"""1.x"""'}), "(couch_version='1.x')\n", (420, 441), False, 'from nose.plugins.attrib import attr\n'), ((4153, 4175), 'nose.plugins.attrib.attr', 'attr', ([], {'requires': '"""couch"""'}), "(requires='couch')\n", (4157, 4175), False, 'from nose.plugins.attrib import attr\n'), ((4177, 4202), 'nose.plugins.attrib.attr', 'attr', ([], {'couch_version': '"""2.x"""'}), "(couch_version='2.x')\n", (4181, 4202), False, 'from nose.plugins.attrib import attr\n'), ((1313, 1359), 'tests.checks.common.AgentCheckTest.__init__', 'AgentCheckTest.__init__', (['self', '*args'], {}), '(self, *args, **kwargs)\n', (1336, 1359), False, 'from tests.checks.common import AgentCheckTest\n'), ((4835, 4881), 'tests.checks.common.AgentCheckTest.__init__', 'AgentCheckTest.__init__', (['self', '*args'], {}), '(self, *args, **kwargs)\n', (4858, 4881), False, 'from tests.checks.common import AgentCheckTest\n'), ((16605, 16644), 'urlparse.urljoin', 'urljoin', (["self.NODE1['server']", '"""kennel"""'], {}), "(self.NODE1['server'], 'kennel')\n", (16612, 16644), False, 'from urlparse import urljoin\n'), ((17592, 17640), 'urlparse.urljoin', 'urljoin', (["self.NODE1['server']", '"""kennel/_compact"""'], {}), "(self.NODE1['server'], 'kennel/_compact')\n", (17599, 17640), False, 'from urlparse import urljoin\n'), ((17653, 17772), 'requests.post', 'requests.post', (['url'], {'auth': "(self.NODE1['user'], self.NODE1['password'])", 'headers': "{'Content-Type': 'application/json'}"}), "(url, auth=(self.NODE1['user'], self.NODE1['password']),\n headers={'Content-Type': 'application/json'})\n", (17666, 17772), False, 'import requests\n'), ((17813, 17870), 'urlparse.urljoin', 'urljoin', (["self.NODE1['server']", '"""_global_changes/_compact"""'], {}), "(self.NODE1['server'], '_global_changes/_compact')\n", (17820, 17870), False, 'from urlparse import urljoin\n'), ((17883, 18002), 'requests.post', 'requests.post', (['url'], {'auth': "(self.NODE1['user'], self.NODE1['password'])", 'headers': "{'Content-Type': 'application/json'}"}), "(url, auth=(self.NODE1['user'], self.NODE1['password']),\n headers={'Content-Type': 'application/json'})\n", (17896, 18002), False, 'import requests\n'), ((18246, 18285), 'urlparse.urljoin', 'urljoin', (["self.NODE1['server']", '"""kennel"""'], {}), "(self.NODE1['server'], 'kennel')\n", (18253, 18285), False, 'from urlparse import urljoin\n'), ((18865, 18928), 'urlparse.urljoin', 'urljoin', (["self.NODE1['server']", '"""kennel/_design/dummy/_view/all"""'], {}), "(self.NODE1['server'], 'kennel/_design/dummy/_view/all')\n", (18872, 18928), False, 'from urlparse import urljoin\n'), ((5260, 5279), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (5270, 5279), False, 'import csv\n'), ((16182, 16195), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (16192, 16195), False, 'import time\n'), ((16212, 16321), 'requests.get', 'requests.get', (["(self.NODE1['server'] + '/_active_tasks')"], {'auth': "(self.NODE1['user'], self.NODE1['password'])"}), "(self.NODE1['server'] + '/_active_tasks', auth=(self.NODE1[\n 'user'], self.NODE1['password']))\n", (16224, 16321), False, 'import requests\n'), ((16001, 16029), 'json.dumps', 'json.dumps', (['replication_body'], {}), '(replication_body)\n', (16011, 16029), False, 'import json\n'), ((16743, 16754), 'time.time', 'time.time', ([], {}), '()\n', (16752, 16754), False, 'import time\n'), ((16899, 16915), 'json.dumps', 'json.dumps', (['body'], {}), '(body)\n', (16909, 16915), False, 'import json\n'), ((17127, 17138), 'time.time', 'time.time', ([], {}), '()\n', (17136, 17138), False, 'import time\n'), ((18692, 18723), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {}), '(self)\n', (18717, 18723), False, 'import threading\n'), ((18772, 18812), 'requests.get', 'requests.get', (['self._url'], {'auth': 'self._auth'}), '(self._url, auth=self._auth)\n', (18784, 18812), False, 'import requests\n'), ((19663, 19694), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {}), '(self)\n', (19688, 19694), False, 'import threading\n'), ((20272, 20327), 'urlparse.urljoin', 'urljoin', (['self._server', '"""kennel/_design/dummy/_view/all"""'], {}), "(self._server, 'kennel/_design/dummy/_view/all')\n", (20279, 20327), False, 'from urlparse import urljoin\n'), ((20559, 20618), 'urlparse.urljoin', 'urljoin', (['self._server', '"""kennel/_design/dummy/_view/by_data"""'], {}), "(self._server, 'kennel/_design/dummy/_view/by_data')\n", (20566, 20618), False, 'from urlparse import urljoin\n'), ((21704, 21750), 'urlparse.urljoin', 'urljoin', (['self._server', '"""kennel/_compact/dummy"""'], {}), "(self._server, 'kennel/_compact/dummy')\n", (21711, 21750), False, 'from urlparse import urljoin\n'), ((21771, 21856), 'requests.post', 'requests.post', (['url'], {'auth': 'self._auth', 'headers': "{'Content-Type': 'application/json'}"}), "(url, auth=self._auth, headers={'Content-Type':\n 'application/json'})\n", (21784, 21856), False, 'import requests\n'), ((17314, 17330), 'json.dumps', 'json.dumps', (['body'], {}), '(body)\n', (17324, 17330), False, 'import json\n'), ((20373, 20418), 'requests.get', 'requests.get', (['url'], {'auth': 'self._auth', 'timeout': '(1)'}), '(url, auth=self._auth, timeout=1)\n', (20385, 20418), False, 'import requests\n'), ((20664, 20709), 'requests.get', 'requests.get', (['url'], {'auth': 'self._auth', 'timeout': '(1)'}), '(url, auth=self._auth, timeout=1)\n', (20676, 20709), False, 'import requests\n'), ((21456, 21487), 'urlparse.urljoin', 'urljoin', (['self._server', '"""kennel"""'], {}), "(self._server, 'kennel')\n", (21463, 21487), False, 'from urlparse import urljoin\n'), ((20925, 20954), 'random.randint', 'random.randint', (['(0)', '(1000000000)'], {}), '(0, 1000000000)\n', (20939, 20954), False, 'import random\n'), ((21171, 21187), 'json.dumps', 'json.dumps', (['body'], {}), '(body)\n', (21181, 21187), False, 'import json\n'), ((21391, 21402), 'time.time', 'time.time', ([], {}), '()\n', (21400, 21402), False, 'import time\n'), ((21557, 21573), 'json.dumps', 'json.dumps', (['body'], {}), '(body)\n', (21567, 21573), False, 'import json\n'), ((22471, 22518), 're.search', 're.search', (['"""view_compaction\\\\.progress"""', 'm_name'], {}), "('view_compaction\\\\.progress', m_name)\n", (22480, 22518), False, 'import re\n'), ((19974, 20027), 'random.choice', 'random.choice', (['(string.ascii_uppercase + string.digits)'], {}), '(string.ascii_uppercase + string.digits)\n', (19987, 20027), False, 'import random\n'), ((17527, 17538), 'time.time', 'time.time', ([], {}), '()\n', (17536, 17538), False, 'import time\n'), ((18475, 18486), 'time.time', 'time.time', ([], {}), '()\n', (18484, 18486), False, 'import time\n')]
|
#!/usr/bin/env python3
# Copyright (C) 2012-2018 <NAME> <<EMAIL>>
# Copyright (C) 2018 <NAME> <<EMAIL>>
# Copyright (C) 2018 Electrum Technologies GmbH
#
# This file is licensed under the MIT license. See LICENCE file for more information.
#
import math
import re
from typing import Callable, Any
from .uikit_bindings import *
from . import utils
from .custom_objc import *
from electrum.i18n import _
from electrum import WalletStorage, Wallet
def Create_SeedDisplayVC(seed: str, passphrase: str) -> ObjCInstance:
ret = SeedDisplayVC.seedDisplayVCWithSeed_passphrase_(ns_from_py(seed), ns_from_py(passphrase))
# utils.add_callback(ret, 'okcallback', callback)
return ret
class SeedDisplayVC(SeedDisplayBase):
@objc_classmethod
def seedDisplayVCWithSeed_passphrase_(cls: ObjCInstance, seed: ObjCInstance,
passphrase: ObjCInstance) -> ObjCInstance:
ret = SeedDisplayVC.new().autorelease()
ret.seed = seed
ret.passphrase = passphrase
ret.modalPresentationStyle = UIModalPresentationOverFullScreen # UIModalPresentationOverCurrentContext
ret.modalTransitionStyle = UIModalTransitionStyleCrossDissolve
# ret.disablesAutomaticKeyboardDismissal = False #NB: this caused an app crash due to missing selector in some iOS! DO NOT USE!
return ret
@objc_method
def dealloc(self) -> None:
utils.remove_all_callbacks(self)
send_super(__class__, self, 'dealloc')
@objc_method
def loadView(self) -> None:
NSBundle.mainBundle.loadNibNamed_owner_options_("SeedDialog", self, None)
f = self.contentView.frame
sv = UIScrollView.alloc().initWithFrame_(CGRectMake(0, 0, f.size.width, f.size.height)).autorelease()
sv.contentSize = CGSizeMake(f.size.width, f.size.height)
sv.backgroundColor = UIColor.colorWithRed_green_blue_alpha_(0., 0., 0., 0.3)
sv.opaque = False
sv.addSubview_(self.contentView)
self.view = sv
@objc_method
def viewWillAppear_(self, animated: bool) -> None:
send_super(__class__, self, 'viewWillAppear:', animated, argtypes=[c_bool])
seed = py_from_ns(self.seed)
passphrase = py_from_ns(self.passphrase)
self.seedTit.setText_withKerning_(_("Your wallet generation seed is:"), utils._kern)
self.extTit.setText_withKerning_(_("Your seed extension is") + ":", utils._kern)
f1 = UIFont.systemFontOfSize_weight_(16.0, UIFontWeightBold)
utils.uilabel_replace_attributed_text(self.seedLbl, seed, font=f1)
utils.uilabel_replace_attributed_text(self.extLbl, passphrase or '', font=f1)
utils.uilabel_replace_attributed_text(self.blurb,
(_("Please save these %d words on paper (order is important). ") % (
len(seed.split()) + (
len(passphrase.split()) if passphrase else 0)))
+ _(
"This seed will allow you to recover your wallet in case of computer failure."),
font=UIFont.italicSystemFontOfSize_(14.0))
self.okBut.setTitle_forState_(_("OK"), UIControlStateNormal)
self.warnTit.text = _("WARNING")
self.warn1.text = _("Never disclose your seed.")
self.warn3.text = _("Never type it on a website.")
self.warn2.text = _("Do not store it electronically.")
if not passphrase:
self.extTit.setHidden_(True)
self.extLbl.setHidden_(True)
self.csBlurbTop.constant = -40.0
self.csBlurbHeight.constant = 80.0
self.csBlurbBot.constant = 40.0
self.csOkButHeight.constant = 40.0
self.csTitTop.constant = 20.0
else:
self.extTit.setHidden_(False)
self.extLbl.setHidden_(False)
self.csBlurbTop.constant = 20.0
self.csBlurbBot.constant = 20.0
self.csBlurbHeight.constant = 60.0
self.csOkButHeight.constant = 30.0
self.csTitTop.constant = 10.0
@objc_method
def onCopyBut_(self, but) -> None:
data = str(self.seed)
if self.passphrase: data += " / " + str(self.passphrase)
UIPasteboard.generalPasteboard.string = data
utils.show_notification(message=_("Text copied to clipboard"))
@objc_method
def onOk_(self, sender) -> None:
self.presentingViewController.dismissViewControllerAnimated_completion_(True, None)
@objc_method
def onQRBut_(self, but) -> None:
data = self.seed
if self.passphrase: data += " / " + self.passphrase
qrvc = utils.present_qrcode_vc_for_data(vc=self,
data=data,
title=_('Wallet Seed'))
closeButton = UIBarButtonItem.alloc().initWithBarButtonSystemItem_target_action_(UIBarButtonSystemItemStop,
self, SEL(
b'onModalClose:')).autorelease()
qrvc.navigationItem.leftBarButtonItem = closeButton
@objc_method
def onModalClose_(self, but: ObjCInstance) -> None:
self.dismissViewControllerAnimated_completion_(True, None)
@objc_method
def onSeedLblTap_(self, uigr: ObjCInstance) -> None:
ipadAnchor = None
if utils.is_ipad():
ipadAnchor = self.seedLbl.bounds
ipadAnchor = self.seedLbl.convertRect_toView_(ipadAnchor, self.view)
utils.show_alert(
vc=self,
title=_("Options"),
message=_("Wallet Seed"),
actions=[
[_('Cancel')],
[_('Copy to clipboard'), self.onCopyBut_, None],
[_('Show as QR code'), self.onQRBut_, None],
],
cancel=_('Cancel'),
style=UIAlertControllerStyleActionSheet,
ipadAnchor=ipadAnchor
)
'''
def seed_warning_msg(seed, passphrase):
return ''.join([
'<font face="Verdana, Arial, Helvetica" color=#414141>',
"<p>",
str(_("Your seed extension is") + ": <b>" + passphrase + "</b></p><p>") if passphrase else '',
_("Please save these %d words on paper (order is important). "),
_("This seed will allow you to recover your wallet in case "
"of computer failure."),
"</p>",
'<p>',
"<b>" + _("WARNING") + ":</b>",
"<ul>",
"<li>" + _("Never disclose your seed.") + "</li>",
"<li>" + _("Never type it on a website.") + "</li>",
"<li>" + _("Do not store it electronically.") + "</li>",
"</ul>",
'</p>',
'</font>',
]) % len(seed.split())
'''
|
[
"electrum.i18n._"
] |
[((3397, 3409), 'electrum.i18n._', '_', (['"""WARNING"""'], {}), "('WARNING')\n", (3398, 3409), False, 'from electrum.i18n import _\n'), ((3436, 3466), 'electrum.i18n._', '_', (['"""Never disclose your seed."""'], {}), "('Never disclose your seed.')\n", (3437, 3466), False, 'from electrum.i18n import _\n'), ((3493, 3525), 'electrum.i18n._', '_', (['"""Never type it on a website."""'], {}), "('Never type it on a website.')\n", (3494, 3525), False, 'from electrum.i18n import _\n'), ((3552, 3588), 'electrum.i18n._', '_', (['"""Do not store it electronically."""'], {}), "('Do not store it electronically.')\n", (3553, 3588), False, 'from electrum.i18n import _\n'), ((2304, 2340), 'electrum.i18n._', '_', (['"""Your wallet generation seed is:"""'], {}), "('Your wallet generation seed is:')\n", (2305, 2340), False, 'from electrum.i18n import _\n'), ((3338, 3345), 'electrum.i18n._', '_', (['"""OK"""'], {}), "('OK')\n", (3339, 3345), False, 'from electrum.i18n import _\n'), ((2396, 2423), 'electrum.i18n._', '_', (['"""Your seed extension is"""'], {}), "('Your seed extension is')\n", (2397, 2423), False, 'from electrum.i18n import _\n'), ((3077, 3163), 'electrum.i18n._', '_', (['"""This seed will allow you to recover your wallet in case of computer failure."""'], {}), "('This seed will allow you to recover your wallet in case of computer failure.'\n )\n", (3078, 3163), False, 'from electrum.i18n import _\n'), ((4490, 4519), 'electrum.i18n._', '_', (['"""Text copied to clipboard"""'], {}), "('Text copied to clipboard')\n", (4491, 4519), False, 'from electrum.i18n import _\n'), ((4978, 4994), 'electrum.i18n._', '_', (['"""Wallet Seed"""'], {}), "('Wallet Seed')\n", (4979, 4994), False, 'from electrum.i18n import _\n'), ((5783, 5795), 'electrum.i18n._', '_', (['"""Options"""'], {}), "('Options')\n", (5784, 5795), False, 'from electrum.i18n import _\n'), ((5817, 5833), 'electrum.i18n._', '_', (['"""Wallet Seed"""'], {}), "('Wallet Seed')\n", (5818, 5833), False, 'from electrum.i18n import _\n'), ((6048, 6059), 'electrum.i18n._', '_', (['"""Cancel"""'], {}), "('Cancel')\n", (6049, 6059), False, 'from electrum.i18n import _\n'), ((2779, 2842), 'electrum.i18n._', '_', (['"""Please save these %d words on paper (order is important). """'], {}), "('Please save these %d words on paper (order is important). ')\n", (2780, 2842), False, 'from electrum.i18n import _\n'), ((5874, 5885), 'electrum.i18n._', '_', (['"""Cancel"""'], {}), "('Cancel')\n", (5875, 5885), False, 'from electrum.i18n import _\n'), ((5905, 5927), 'electrum.i18n._', '_', (['"""Copy to clipboard"""'], {}), "('Copy to clipboard')\n", (5906, 5927), False, 'from electrum.i18n import _\n'), ((5970, 5990), 'electrum.i18n._', '_', (['"""Show as QR code"""'], {}), "('Show as QR code')\n", (5971, 5990), False, 'from electrum.i18n import _\n')]
|
from django.db.models import Q
from django.core.cache import cache, caches, InvalidCacheBackendError
from pt_law_parser import analyse, common_managers, observers, ObserverManager, \
from_json, html_toc
from law.models import Document, Type
PLURALS = {'Decreto-Lei': ['Decretos-Leis', 'Decretos-Lei'],
'Lei': ['Leis'],
'Portaria': ['Portarias']}
SINGULARS = {'Decretos-Leis': 'Decreto-Lei',
'Decretos-Lei': 'Decreto-Lei',
'Leis': 'Lei',
'Portarias': 'Portaria'}
def get_references(document, analysis=None):
if analysis is None:
analysis = text_analysis(document)
query = Q()
for name, number in analysis.get_doc_refs():
type_name = name
if name in SINGULARS:
type_name = SINGULARS[name]
query |= Q(type__name=type_name, number=number)
return Document.objects.exclude(dr_series='II').filter(query)\
.exclude(id=document.id).prefetch_related('type')
def _text_analysis(document):
type_names = list(Type.objects.exclude(name__contains='(')
.exclude(dr_series='II').values_list('name', flat=True))
type_names += [plural for name in type_names if name in PLURALS
for plural in PLURALS[name]]
managers = common_managers + [
ObserverManager(dict((name, observers.DocumentRefObserver)
for name in type_names))]
terms = {' ', '.', ',', '\n', 'n.os', '«', '»'}
for manager in managers:
terms |= manager.terms
analysis = analyse(document.text, managers, terms)
docs = get_references(document, analysis)
mapping = {}
for doc in docs:
type_name = doc.type.name
if doc.type.name in PLURALS:
for plural in PLURALS[doc.type.name]:
mapping[(plural, doc.number)] = doc.get_absolute_url()
mapping[(type_name, doc.number)] = doc.get_absolute_url()
analysis.set_doc_refs(mapping)
return analysis
def text_analysis(document):
"""
Cached version of `_text_analysis`. Uses cache `law_analysis` to store
the result.
"""
# short-circuit if no caching present
try:
cache = caches['law_analysis']
except InvalidCacheBackendError:
return _text_analysis(document)
key = 'text_analysis>%d' % document.dre_doc_id
result = cache.get(key)
if result is None:
result = _text_analysis(document)
cache.set(key, result.as_json())
else:
result = from_json(result)
return result
def compose_all(document):
key = 'compose_all>%d>%d' % (1, document.dre_doc_id)
result = cache.get(key)
if result is None:
result = text_analysis(document)
result = (result.as_html(), html_toc(result).as_html())
cache.set(key, result)
return result
|
[
"law.models.Document.objects.exclude",
"pt_law_parser.analyse",
"django.core.cache.cache.set",
"django.db.models.Q",
"django.core.cache.cache.get",
"pt_law_parser.html_toc",
"law.models.Type.objects.exclude",
"pt_law_parser.from_json"
] |
[((660, 663), 'django.db.models.Q', 'Q', ([], {}), '()\n', (661, 663), False, 'from django.db.models import Q\n'), ((1567, 1606), 'pt_law_parser.analyse', 'analyse', (['document.text', 'managers', 'terms'], {}), '(document.text, managers, terms)\n', (1574, 1606), False, 'from pt_law_parser import analyse, common_managers, observers, ObserverManager, from_json, html_toc\n'), ((2378, 2392), 'django.core.cache.cache.get', 'cache.get', (['key'], {}), '(key)\n', (2387, 2392), False, 'from django.core.cache import cache, caches, InvalidCacheBackendError\n'), ((2662, 2676), 'django.core.cache.cache.get', 'cache.get', (['key'], {}), '(key)\n', (2671, 2676), False, 'from django.core.cache import cache, caches, InvalidCacheBackendError\n'), ((825, 863), 'django.db.models.Q', 'Q', ([], {'type__name': 'type_name', 'number': 'number'}), '(type__name=type_name, number=number)\n', (826, 863), False, 'from django.db.models import Q\n'), ((2526, 2543), 'pt_law_parser.from_json', 'from_json', (['result'], {}), '(result)\n', (2535, 2543), False, 'from pt_law_parser import analyse, common_managers, observers, ObserverManager, from_json, html_toc\n'), ((2813, 2835), 'django.core.cache.cache.set', 'cache.set', (['key', 'result'], {}), '(key, result)\n', (2822, 2835), False, 'from django.core.cache import cache, caches, InvalidCacheBackendError\n'), ((2777, 2793), 'pt_law_parser.html_toc', 'html_toc', (['result'], {}), '(result)\n', (2785, 2793), False, 'from pt_law_parser import analyse, common_managers, observers, ObserverManager, from_json, html_toc\n'), ((1044, 1084), 'law.models.Type.objects.exclude', 'Type.objects.exclude', ([], {'name__contains': '"""("""'}), "(name__contains='(')\n", (1064, 1084), False, 'from law.models import Document, Type\n'), ((876, 916), 'law.models.Document.objects.exclude', 'Document.objects.exclude', ([], {'dr_series': '"""II"""'}), "(dr_series='II')\n", (900, 916), False, 'from law.models import Document, Type\n')]
|
'''
Your job in this exercise is to compute the yearly percent-change of US GDP (Gross Domestic Product) since 2008.
The data has been obtained from the Federal Reserve Bank of St. Louis and is available in the file GDP.csv, which contains quarterly data; you will resample it to annual sampling and then compute the annual growth of GDP. For a refresher on resampling, check out the relevant material from pandas Foundations.
'''
import pandas as pd
# Read 'GDP.csv' into a DataFrame: gdp
gdp = pd.read_csv('GDP.csv', parse_dates=True, index_col='DATE')
# Slice all the gdp data from 2008 onward: post2008
post2008 = gdp[gdp.index >= '2008']
# Print the last 8 rows of post2008
print(post2008.tail(8))
# Resample post2008 by year, keeping last(): yearly
yearly = post2008.resample('A').last()
# Print yearly
print(yearly)
# Compute percentage growth of yearly: yearly['growth']
yearly['growth'] = yearly.pct_change() * 100
# Print yearly again
print(yearly)
|
[
"pandas.read_csv"
] |
[((498, 556), 'pandas.read_csv', 'pd.read_csv', (['"""GDP.csv"""'], {'parse_dates': '(True)', 'index_col': '"""DATE"""'}), "('GDP.csv', parse_dates=True, index_col='DATE')\n", (509, 556), True, 'import pandas as pd\n')]
|
from __future__ import annotations
import logging
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING
import pygame
import pytweening
from scripts.core import utility
from scripts.core.constants import Direction, NodeType, OverworldState, SceneType
from scripts.scenes.overworld.elements.node import Node
if TYPE_CHECKING:
from typing import Dict, List, Optional, Tuple
from scripts.core.game import Game
__all__ = ["NodeContainer"]
class NodeContainer(ABC):
def __init__(self, game: Game):
self.game: Game = game
self.selected_node: Optional[Node] = None
self.target_node: Optional[Node] = None
self.selection_pos: Tuple[float, float] = (0, 0) # where the selection is drawn
self.boss_pos: Tuple[float, float] = (0, 0)
self.max_travel_time: float = 1.75
self.current_travel_time: float = 0.0
self._wait_time_after_arrival: float = 1.0
self._current_wait_time: float = 0.0
self.is_travel_paused: bool = False
self.is_due_event: bool = False # true if waiting for an event to trigger
self.events_triggered: int = 0 # number of events triggered so far
self.event_notification_timer: float = 0.0
self.show_event_notification: bool = False
@abstractmethod
def render(self, surface: pygame.Surface):
pass
@abstractmethod
def update(self, delta_time: float):
pass
@abstractmethod
def select_next_node(self, direction: Direction):
pass
@abstractmethod
def generate_nodes(self):
pass
def roll_for_event(self):
"""
Roll to see if an event will be triggered when transitioning between nodes.
"""
# check if we have hit the limit of events
if self.events_triggered >= self.game.data.config["overworld"]["max_events_per_level"]:
return
if self.game.rng.roll() < self.game.data.config["overworld"]["chance_of_event"]:
self.is_due_event = True
def _get_node_icon(self, node_type: NodeType) -> pygame.Surface:
"""
Get the node icon from the node type
"""
if node_type == NodeType.COMBAT:
node_icon = self.game.assets.get_image("nodes", "combat")
elif node_type == NodeType.EVENT:
node_icon = self.game.assets.get_image("nodes", "event")
elif node_type == NodeType.INN:
node_icon = self.game.assets.get_image("nodes", "inn")
elif node_type == NodeType.TRAINING:
node_icon = self.game.assets.get_image("nodes", "training")
elif node_type == NodeType.BOSS_COMBAT:
node_icon = self.game.assets.get_image("nodes", "boss_combat")
else:
# node_type == NodeType.BLANK:
node_icon = self.game.assets.get_image("nodes", "blank")
return node_icon
def _get_random_node_type(self) -> NodeType:
"""
Return a random node type
"""
node_weights_dict = self.game.data.config["overworld"]["node_weights"]
node_types = [NodeType.COMBAT, NodeType.INN, NodeType.TRAINING, NodeType.BLANK]
node_weights = []
try:
for enum_ in node_types:
name = enum_.name.lower()
node_weights.append(node_weights_dict[name])
except KeyError as key_error:
logging.warning(f"generate_map: Node key not found in config file. Defaults used. err:{key_error}")
# overwrite with default
node_weights = []
for enum_ in node_types:
node_weights.append(0.1)
node_type = self.game.rng.choices(node_types, node_weights, k=1)[0]
return node_type
def _transition_to_new_node(self, delta_time: float):
"""
Move the selection pos from the selected node to the target node. Will trigger event if one is due. Updates
selected node when complete.
"""
target = self.target_node
selected = self.selected_node
# ensure we have a target, if not reset everything.
if target is None:
# update flags
self.is_travel_paused = True
self.current_travel_time = 0
self._current_wait_time = 0
self.game.overworld.state = OverworldState.READY
return
# update timer
self.current_travel_time += delta_time
percent_time_complete = min(1.0, self.current_travel_time / self.max_travel_time)
# update selection position
lerp_amount = pytweening.easeInQuad(percent_time_complete)
x = utility.lerp(selected.pos[0], target.pos[0], lerp_amount)
y = utility.lerp(selected.pos[1], target.pos[1], lerp_amount)
self.selection_pos = (x, y)
if percent_time_complete >= 0.5 and self.is_due_event:
self.is_travel_paused = True
self.is_due_event = False
self.events_triggered += 1
self.event_notification_timer = self.game.data.config["overworld"]["event_notification_duration"]
self.show_event_notification = True
# check if at target pos
elif percent_time_complete >= 1.0:
# if not trigger on touch then skip wait time
if not self.target_node.is_trigger_on_touch:
self._current_wait_time = self._wait_time_after_arrival
# handle wait time
self._current_wait_time += delta_time
if self._current_wait_time >= self._wait_time_after_arrival:
# update flags
self.is_travel_paused = True
self.selected_node = self.target_node
self.target_node = None
self.selection_pos = self.selected_node.pos
self.current_travel_time = 0
self._current_wait_time = 0
# pay travel cost
self.game.overworld.pay_move_cost()
# count down boss timer
self.game.memory.days_until_boss -= 1
# trigger if not already completed and is an auto-triggering type
if self.selected_node.is_trigger_on_touch:
self.trigger_current_node()
def trigger_current_node(self):
"""
Activate the current node and trigger the scene change.
"""
selected_node = self.selected_node
selected_node_type = selected_node.type
logging.info(f"Next node, {selected_node_type.name}, selected.")
# change active scene
scene = None
if selected_node_type == NodeType.COMBAT:
scene = SceneType.COMBAT
elif selected_node_type == NodeType.INN:
scene = SceneType.INN
elif selected_node_type == NodeType.TRAINING:
scene = SceneType.TRAINING
elif selected_node_type == NodeType.EVENT:
scene = SceneType.EVENT
elif selected_node_type == NodeType.BLANK:
scene = SceneType.OVERWORLD
elif selected_node_type == NodeType.BOSS_COMBAT:
scene = SceneType.BOSS_COMBAT
else:
logging.warning(f"Node type ({selected_node_type}) of current node not recognised. No action taken.")
if scene is not None:
selected_node.complete()
# reveal node type
selected_node.reveal_type()
self.game.change_scene(scene)
|
[
"logging.warning",
"logging.info",
"scripts.core.utility.lerp",
"pytweening.easeInQuad"
] |
[((4726, 4770), 'pytweening.easeInQuad', 'pytweening.easeInQuad', (['percent_time_complete'], {}), '(percent_time_complete)\n', (4747, 4770), False, 'import pytweening\n'), ((4784, 4841), 'scripts.core.utility.lerp', 'utility.lerp', (['selected.pos[0]', 'target.pos[0]', 'lerp_amount'], {}), '(selected.pos[0], target.pos[0], lerp_amount)\n', (4796, 4841), False, 'from scripts.core import utility\n'), ((4855, 4912), 'scripts.core.utility.lerp', 'utility.lerp', (['selected.pos[1]', 'target.pos[1]', 'lerp_amount'], {}), '(selected.pos[1], target.pos[1], lerp_amount)\n', (4867, 4912), False, 'from scripts.core import utility\n'), ((6670, 6734), 'logging.info', 'logging.info', (['f"""Next node, {selected_node_type.name}, selected."""'], {}), "(f'Next node, {selected_node_type.name}, selected.')\n", (6682, 6734), False, 'import logging\n'), ((3497, 3606), 'logging.warning', 'logging.warning', (['f"""generate_map: Node key not found in config file. Defaults used. err:{key_error}"""'], {}), "(\n f'generate_map: Node key not found in config file. Defaults used. err:{key_error}'\n )\n", (3512, 3606), False, 'import logging\n'), ((7370, 7481), 'logging.warning', 'logging.warning', (['f"""Node type ({selected_node_type}) of current node not recognised. No action taken."""'], {}), "(\n f'Node type ({selected_node_type}) of current node not recognised. No action taken.'\n )\n", (7385, 7481), False, 'import logging\n')]
|
from unittest.mock import patch
from django.test import TestCase
from tests import path, read
class MappingSheetTestCase(TestCase):
url = '/mapping-sheet/'
def assertSuccess(self, method, expected, data):
response = getattr(self.client, method)(self.url, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'text/csv')
self.assertEqual(response['Content-Disposition'], 'attachment; filename="mapping-sheet.csv"')
self.assertEqual(response.content.decode('utf-8').replace('\r\n', '\n'), read(expected))
def assertError(self, data, message, nonfield=None):
response = self.client.post(self.url, data)
content = response.content.decode('utf-8')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8')
if nonfield:
self.assertIn('<ul class="errorlist nonfield"><li>', content)
else:
self.assertIn('<ul class="errorlist"><li>', content)
self.assertIn(message, content)
@patch('default.forms._get_tags')
def test_get(self, mocked):
mocked.return_value = ('1__0__0', '1__0__1')
response = self.client.get(self.url)
content = response.content.decode('utf-8')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8')
self.assertIn('<option value="1__0__0">1.0.0</option>', content)
self.assertIn('<option value="1__0__1">1.0.1</option>', content)
def test_get_url(self):
self.assertSuccess('get', 'results/mapping-sheet.csv', {
'source': 'https://standard.open-contracting.org/schema/1__1__4/release-schema.json'
})
def test_post_select(self):
self.assertSuccess('post', 'results/mapping-sheet.csv', {
'type': 'select',
'select_url': 'https://standard.open-contracting.org/schema/1__1__4/release-schema.json',
})
def test_post_url(self):
self.assertSuccess('post', 'results/mapping-sheet.csv', {
'type': 'url',
'custom_url': 'https://standard.open-contracting.org/schema/1__1__4/release-schema.json',
})
def test_post_file(self):
with open(path('schemas/ocds-ppp-1_0_0-release-schema.json')) as f:
self.assertSuccess('post', 'results/ocds-ppp-1_0_0-mapping-sheet.csv', {
'type': 'file',
'custom_file': f,
})
@patch('default.forms._get_tags')
def test_get_extension(self, mocked):
mocked.return_value = ('1__1__3', '1__1__4')
self.assertSuccess('get', 'results/bids-location-mapping-sheet.csv', {
'version': '1__1__4',
'extension': [
'https://raw.githubusercontent.com/open-contracting-extensions/ocds_bid_extension/v1.1.4/extension.json', # noqa
'https://raw.githubusercontent.com/open-contracting-extensions/ocds_location_extension/v1.1.4/extension.json', # noqa
]
})
@patch('default.forms._get_tags')
def test_post_extension(self, mocked):
mocked.return_value = ('1__1__3', '1__1__4')
self.assertSuccess('post', 'results/bids-location-mapping-sheet.csv', {
'type': 'extension',
'version': '1__1__4',
'extension_url_0': 'https://raw.githubusercontent.com/open-contracting-extensions/ocds_bid_extension/v1.1.4/extension.json', # noqa
'extension_url_1': 'https://raw.githubusercontent.com/open-contracting-extensions/ocds_location_extension/v1.1.4/extension.json', # noqa
})
def test_post_empty(self):
self.assertError({}, 'Please choose an operation type')
def test_post_error_select(self):
self.assertError({'type': 'select'}, 'Please select an option')
def test_post_error_url(self):
self.assertError({'type': 'url'}, 'Please provide a URL')
def test_post_error_file(self):
self.assertError({'type': 'file'}, 'Please provide a file')
def test_post_error_extension(self):
self.assertError({'type': 'extension'}, 'Provide at least one extension URL', nonfield=True)
|
[
"unittest.mock.patch",
"tests.path",
"tests.read"
] |
[((1110, 1142), 'unittest.mock.patch', 'patch', (['"""default.forms._get_tags"""'], {}), "('default.forms._get_tags')\n", (1115, 1142), False, 'from unittest.mock import patch\n'), ((2562, 2594), 'unittest.mock.patch', 'patch', (['"""default.forms._get_tags"""'], {}), "('default.forms._get_tags')\n", (2567, 2594), False, 'from unittest.mock import patch\n'), ((3127, 3159), 'unittest.mock.patch', 'patch', (['"""default.forms._get_tags"""'], {}), "('default.forms._get_tags')\n", (3132, 3159), False, 'from unittest.mock import patch\n'), ((581, 595), 'tests.read', 'read', (['expected'], {}), '(expected)\n', (585, 595), False, 'from tests import path, read\n'), ((2332, 2382), 'tests.path', 'path', (['"""schemas/ocds-ppp-1_0_0-release-schema.json"""'], {}), "('schemas/ocds-ppp-1_0_0-release-schema.json')\n", (2336, 2382), False, 'from tests import path, read\n')]
|
"""HACS Startup constrains."""
# pylint: disable=bad-continuation
import os
from .const import CUSTOM_UPDATER_LOCATIONS, CUSTOM_UPDATER_WARNING
from .helpers.misc import version_left_higher_then_right
from custom_components.hacs.globals import get_hacs
MINIMUM_HA_VERSION = "0.110.0"
def check_constrains():
"""Check HACS constrains."""
if not constrain_translations():
return False
if not constrain_custom_updater():
return False
if not constrain_version():
return False
return True
def constrain_custom_updater():
"""Check if custom_updater exist."""
hacs = get_hacs()
for location in CUSTOM_UPDATER_LOCATIONS:
if os.path.exists(location.format(hacs.system.config_path)):
msg = CUSTOM_UPDATER_WARNING.format(
location.format(hacs.system.config_path)
)
hacs.logger.critical(msg)
return False
return True
def constrain_version():
"""Check if the version is valid."""
hacs = get_hacs()
if not version_left_higher_then_right(hacs.system.ha_version, MINIMUM_HA_VERSION):
hacs.logger.critical(
f"You need HA version {MINIMUM_HA_VERSION} or newer to use this integration."
)
return False
return True
def constrain_translations():
"""Check if traslations exist."""
hacs = get_hacs()
if not os.path.exists(
f"{hacs.system.config_path}/custom_components/hacs/translations"
):
hacs.logger.critical("You are missing the translations directory.")
return False
return True
|
[
"custom_components.hacs.globals.get_hacs",
"os.path.exists"
] |
[((619, 629), 'custom_components.hacs.globals.get_hacs', 'get_hacs', ([], {}), '()\n', (627, 629), False, 'from custom_components.hacs.globals import get_hacs\n'), ((1023, 1033), 'custom_components.hacs.globals.get_hacs', 'get_hacs', ([], {}), '()\n', (1031, 1033), False, 'from custom_components.hacs.globals import get_hacs\n'), ((1369, 1379), 'custom_components.hacs.globals.get_hacs', 'get_hacs', ([], {}), '()\n', (1377, 1379), False, 'from custom_components.hacs.globals import get_hacs\n'), ((1391, 1476), 'os.path.exists', 'os.path.exists', (['f"""{hacs.system.config_path}/custom_components/hacs/translations"""'], {}), "(f'{hacs.system.config_path}/custom_components/hacs/translations'\n )\n", (1405, 1476), False, 'import os\n')]
|
# Generated by Django 3.1.6 on 2022-01-15 10:38
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('scoreboard', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='statistics',
name='last_calculated',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
|
[
"django.db.models.DateTimeField"
] |
[((370, 425), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'django.utils.timezone.now'}), '(default=django.utils.timezone.now)\n', (390, 425), False, 'from django.db import migrations, models\n')]
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
# This script aims to replicate the behavior of examples/sir_hmc.py but using
# the high-level components of pyro.contrib.epidemiology. Command line
# arguments and results should be similar.
import argparse
import logging
import math
import torch
from torch.distributions import biject_to, constraints
import pyro
from pyro.contrib.epidemiology import (OverdispersedSEIRModel, OverdispersedSIRModel, SimpleSEIRModel, SimpleSIRModel,
SuperspreadingSEIRModel, SuperspreadingSIRModel)
logging.basicConfig(format='%(message)s', level=logging.INFO)
def Model(args, data):
"""Dispatch between different model classes."""
if args.incubation_time > 0:
assert args.incubation_time > 1
if args.concentration < math.inf:
return SuperspreadingSEIRModel(args.population, args.incubation_time,
args.recovery_time, data)
elif args.overdispersion > 0:
return OverdispersedSEIRModel(args.population, args.incubation_time,
args.recovery_time, data)
else:
return SimpleSEIRModel(args.population, args.incubation_time,
args.recovery_time, data)
else:
if args.concentration < math.inf:
return SuperspreadingSIRModel(args.population, args.recovery_time, data)
elif args.overdispersion > 0:
return OverdispersedSIRModel(args.population, args.recovery_time, data)
else:
return SimpleSIRModel(args.population, args.recovery_time, data)
def generate_data(args):
extended_data = [None] * (args.duration + args.forecast)
model = Model(args, extended_data)
logging.info("Simulating from a {}".format(type(model).__name__))
for attempt in range(100):
samples = model.generate({"R0": args.basic_reproduction_number,
"rho": args.response_rate,
"k": args.concentration,
"od": args.overdispersion})
obs = samples["obs"][:args.duration]
new_I = samples.get("S2I", samples.get("E2I"))
obs_sum = int(obs.sum())
new_I_sum = int(new_I[:args.duration].sum())
assert 0 <= args.min_obs_portion < args.max_obs_portion <= 1
min_obs = int(math.ceil(args.min_obs_portion * args.population))
max_obs = int(math.floor(args.max_obs_portion * args.population))
if min_obs <= obs_sum <= max_obs:
logging.info("Observed {:d}/{:d} infections:\n{}".format(
obs_sum, new_I_sum, " ".join(str(int(x)) for x in obs)))
return {"new_I": new_I, "obs": obs}
if obs_sum < min_obs:
raise ValueError("Failed to generate >={} observations. "
"Try decreasing --min-obs-portion (currently {})."
.format(min_obs, args.min_obs_portion))
else:
raise ValueError("Failed to generate <={} observations. "
"Try increasing --max-obs-portion (currently {})."
.format(max_obs, args.max_obs_portion))
def infer(args, model):
energies = []
def hook_fn(kernel, *unused):
e = float(kernel._potential_energy_last)
energies.append(e)
if args.verbose:
logging.info("potential = {:0.6g}".format(e))
mcmc = model.fit(heuristic_num_particles=args.num_particles,
heuristic_ess_threshold=args.ess_threshold,
warmup_steps=args.warmup_steps,
num_samples=args.num_samples,
max_tree_depth=args.max_tree_depth,
arrowhead_mass=args.arrowhead_mass,
num_quant_bins=args.num_bins,
haar=args.haar,
haar_full_mass=args.haar_full_mass,
hook_fn=hook_fn)
mcmc.summary()
if args.plot:
import matplotlib.pyplot as plt
plt.figure(figsize=(6, 3))
plt.plot(energies)
plt.xlabel("MCMC step")
plt.ylabel("potential energy")
plt.title("MCMC energy trace")
plt.tight_layout()
return model.samples
def evaluate(args, model, samples):
# Print estimated values.
names = {"basic_reproduction_number": "R0",
"response_rate": "rho"}
if args.concentration < math.inf:
names["concentration"] = "k"
if "od" in samples:
names["overdispersion"] = "od"
for name, key in names.items():
mean = samples[key].mean().item()
std = samples[key].std().item()
logging.info("{}: truth = {:0.3g}, estimate = {:0.3g} \u00B1 {:0.3g}"
.format(key, getattr(args, name), mean, std))
# Optionally plot histograms and pairwise correlations.
if args.plot:
import matplotlib.pyplot as plt
import seaborn as sns
# Plot individual histograms.
fig, axes = plt.subplots(len(names), 1, figsize=(5, 2.5 * len(names)))
axes[0].set_title("Posterior parameter estimates")
for ax, (name, key) in zip(axes, names.items()):
truth = getattr(args, name)
sns.distplot(samples[key], ax=ax, label="posterior")
ax.axvline(truth, color="k", label="truth")
ax.set_xlabel(key + " = " + name.replace("_", " "))
ax.set_yticks(())
ax.legend(loc="best")
plt.tight_layout()
# Plot pairwise joint distributions for selected variables.
covariates = [(name, samples[name]) for name in names.values()]
for i, aux in enumerate(samples["auxiliary"].unbind(-2)):
covariates.append(("aux[{},0]".format(i), aux[:, 0]))
covariates.append(("aux[{},-1]".format(i), aux[:, -1]))
N = len(covariates)
fig, axes = plt.subplots(N, N, figsize=(8, 8), sharex="col", sharey="row")
for i in range(N):
axes[i][0].set_ylabel(covariates[i][0])
axes[0][i].set_xlabel(covariates[i][0])
axes[0][i].xaxis.set_label_position("top")
for j in range(N):
ax = axes[i][j]
ax.set_xticks(())
ax.set_yticks(())
ax.scatter(covariates[j][1], -covariates[i][1],
lw=0, color="darkblue", alpha=0.3)
plt.tight_layout()
plt.subplots_adjust(wspace=0, hspace=0)
# Plot Pearson correlation for every pair of unconstrained variables.
def unconstrain(constraint, value):
value = biject_to(constraint).inv(value)
return value.reshape(args.num_samples, -1)
covariates = [
("R1", unconstrain(constraints.positive, samples["R0"])),
("rho", unconstrain(constraints.unit_interval, samples["rho"]))]
if "k" in samples:
covariates.append(
("k", unconstrain(constraints.positive, samples["k"])))
constraint = constraints.interval(-0.5, model.population + 0.5)
for name, aux in zip(model.compartments, samples["auxiliary"].unbind(-2)):
covariates.append((name, unconstrain(constraint, aux)))
x = torch.cat([v for _, v in covariates], dim=-1)
x -= x.mean(0)
x /= x.std(0)
x = x.t().matmul(x)
x /= args.num_samples
x.clamp_(min=-1, max=1)
plt.figure(figsize=(8, 8))
plt.imshow(x, cmap="bwr")
ticks = torch.tensor([0] + [v.size(-1) for _, v in covariates]).cumsum(0)
ticks = (ticks[1:] + ticks[:-1]) / 2
plt.yticks(ticks, [name for name, _ in covariates])
plt.xticks(())
plt.tick_params(length=0)
plt.title("Pearson correlation (unconstrained coordinates)")
plt.tight_layout()
def predict(args, model, truth):
samples = model.predict(forecast=args.forecast)
obs = model.data
new_I = samples.get("S2I", samples.get("E2I"))
median = new_I.median(dim=0).values
logging.info("Median prediction of new infections (starting on day 0):\n{}"
.format(" ".join(map(str, map(int, median)))))
# Optionally plot the latent and forecasted series of new infections.
if args.plot:
import matplotlib.pyplot as plt
plt.figure()
time = torch.arange(args.duration + args.forecast)
p05 = new_I.kthvalue(int(round(0.5 + 0.05 * args.num_samples)), dim=0).values
p95 = new_I.kthvalue(int(round(0.5 + 0.95 * args.num_samples)), dim=0).values
plt.fill_between(time, p05, p95, color="red", alpha=0.3, label="90% CI")
plt.plot(time, median, "r-", label="median")
plt.plot(time[:args.duration], obs, "k.", label="observed")
if truth is not None:
plt.plot(time, truth, "k--", label="truth")
plt.axvline(args.duration - 0.5, color="gray", lw=1)
plt.xlim(0, len(time) - 1)
plt.ylim(0, None)
plt.xlabel("day after first infection")
plt.ylabel("new infections per day")
plt.title("New infections in population of {}".format(args.population))
plt.legend(loc="upper left")
plt.tight_layout()
def main(args):
pyro.enable_validation(__debug__)
pyro.set_rng_seed(args.rng_seed)
# Generate data.
dataset = generate_data(args)
obs = dataset["obs"]
# Run inference.
model = Model(args, obs)
samples = infer(args, model)
# Evaluate fit.
evaluate(args, model, samples)
# Predict latent time series.
if args.forecast:
predict(args, model, truth=dataset["new_I"])
if __name__ == "__main__":
assert pyro.__version__.startswith('1.3.1')
parser = argparse.ArgumentParser(
description="Compartmental epidemiology modeling using HMC")
parser.add_argument("-p", "--population", default=1000, type=float)
parser.add_argument("-m", "--min-obs-portion", default=0.01, type=float)
parser.add_argument("-M", "--max-obs-portion", default=0.99, type=float)
parser.add_argument("-d", "--duration", default=20, type=int)
parser.add_argument("-f", "--forecast", default=10, type=int)
parser.add_argument("-R0", "--basic-reproduction-number", default=1.5, type=float)
parser.add_argument("-tau", "--recovery-time", default=7.0, type=float)
parser.add_argument("-e", "--incubation-time", default=0.0, type=float,
help="If zero, use SIR model; if > 1 use SEIR model.")
parser.add_argument("-k", "--concentration", default=math.inf, type=float,
help="If finite, use a superspreader model.")
parser.add_argument("-rho", "--response-rate", default=0.5, type=float)
parser.add_argument("-o", "--overdispersion", default=0., type=float)
parser.add_argument("--haar", action="store_true")
parser.add_argument("-hfm", "--haar-full-mass", default=0, type=int)
parser.add_argument("-n", "--num-samples", default=200, type=int)
parser.add_argument("-np", "--num-particles", default=1024, type=int)
parser.add_argument("-ess", "--ess-threshold", default=0.5, type=float)
parser.add_argument("-w", "--warmup-steps", default=100, type=int)
parser.add_argument("-t", "--max-tree-depth", default=5, type=int)
parser.add_argument("-a", "--arrowhead-mass", action="store_true")
parser.add_argument("-r", "--rng-seed", default=0, type=int)
parser.add_argument("-nb", "--num-bins", default=4, type=int)
parser.add_argument("--double", action="store_true", default=True)
parser.add_argument("--single", action="store_false", dest="double")
parser.add_argument("--cuda", action="store_true")
parser.add_argument("--verbose", action="store_true")
parser.add_argument("--plot", action="store_true")
args = parser.parse_args()
args.population = int(args.population) # to allow e.g. --population=1e6
if args.double:
if args.cuda:
torch.set_default_tensor_type(torch.cuda.DoubleTensor)
else:
torch.set_default_dtype(torch.float64)
elif args.cuda:
torch.set_default_tensor_type(torch.cuda.FloatTensor)
main(args)
if args.plot:
import matplotlib.pyplot as plt
plt.show()
|
[
"matplotlib.pyplot.title",
"pyro.enable_validation",
"pyro.contrib.epidemiology.SimpleSEIRModel",
"argparse.ArgumentParser",
"torch.cat",
"torch.set_default_tensor_type",
"matplotlib.pyplot.figure",
"torch.distributions.constraints.interval",
"torch.set_default_dtype",
"torch.arange",
"matplotlib.pyplot.tick_params",
"pyro.contrib.epidemiology.SimpleSIRModel",
"matplotlib.pyplot.fill_between",
"matplotlib.pyplot.tight_layout",
"pyro.contrib.epidemiology.SuperspreadingSEIRModel",
"torch.distributions.biject_to",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"pyro.contrib.epidemiology.OverdispersedSEIRModel",
"matplotlib.pyplot.ylim",
"math.ceil",
"matplotlib.pyplot.legend",
"pyro.contrib.epidemiology.SuperspreadingSIRModel",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.ylabel",
"pyro.contrib.epidemiology.OverdispersedSIRModel",
"logging.basicConfig",
"matplotlib.pyplot.plot",
"math.floor",
"pyro.__version__.startswith",
"pyro.set_rng_seed",
"seaborn.distplot",
"matplotlib.pyplot.xlabel"
] |
[((611, 672), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(message)s"""', 'level': 'logging.INFO'}), "(format='%(message)s', level=logging.INFO)\n", (630, 672), False, 'import logging\n'), ((9323, 9356), 'pyro.enable_validation', 'pyro.enable_validation', (['__debug__'], {}), '(__debug__)\n', (9345, 9356), False, 'import pyro\n'), ((9361, 9393), 'pyro.set_rng_seed', 'pyro.set_rng_seed', (['args.rng_seed'], {}), '(args.rng_seed)\n', (9378, 9393), False, 'import pyro\n'), ((9765, 9801), 'pyro.__version__.startswith', 'pyro.__version__.startswith', (['"""1.3.1"""'], {}), "('1.3.1')\n", (9792, 9801), False, 'import pyro\n'), ((9815, 9904), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Compartmental epidemiology modeling using HMC"""'}), "(description=\n 'Compartmental epidemiology modeling using HMC')\n", (9838, 9904), False, 'import argparse\n'), ((4127, 4153), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 3)'}), '(figsize=(6, 3))\n', (4137, 4153), True, 'import matplotlib.pyplot as plt\n'), ((4162, 4180), 'matplotlib.pyplot.plot', 'plt.plot', (['energies'], {}), '(energies)\n', (4170, 4180), True, 'import matplotlib.pyplot as plt\n'), ((4189, 4212), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""MCMC step"""'], {}), "('MCMC step')\n", (4199, 4212), True, 'import matplotlib.pyplot as plt\n'), ((4221, 4251), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""potential energy"""'], {}), "('potential energy')\n", (4231, 4251), True, 'import matplotlib.pyplot as plt\n'), ((4260, 4290), 'matplotlib.pyplot.title', 'plt.title', (['"""MCMC energy trace"""'], {}), "('MCMC energy trace')\n", (4269, 4290), True, 'import matplotlib.pyplot as plt\n'), ((4299, 4317), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4315, 4317), True, 'import matplotlib.pyplot as plt\n'), ((5578, 5596), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5594, 5596), True, 'import matplotlib.pyplot as plt\n'), ((5986, 6048), 'matplotlib.pyplot.subplots', 'plt.subplots', (['N', 'N'], {'figsize': '(8, 8)', 'sharex': '"""col"""', 'sharey': '"""row"""'}), "(N, N, figsize=(8, 8), sharex='col', sharey='row')\n", (5998, 6048), True, 'import matplotlib.pyplot as plt\n'), ((6500, 6518), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6516, 6518), True, 'import matplotlib.pyplot as plt\n'), ((6527, 6566), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0)', 'hspace': '(0)'}), '(wspace=0, hspace=0)\n', (6546, 6566), True, 'import matplotlib.pyplot as plt\n'), ((7120, 7170), 'torch.distributions.constraints.interval', 'constraints.interval', (['(-0.5)', '(model.population + 0.5)'], {}), '(-0.5, model.population + 0.5)\n', (7140, 7170), False, 'from torch.distributions import biject_to, constraints\n'), ((7334, 7379), 'torch.cat', 'torch.cat', (['[v for _, v in covariates]'], {'dim': '(-1)'}), '([v for _, v in covariates], dim=-1)\n', (7343, 7379), False, 'import torch\n'), ((7523, 7549), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (7533, 7549), True, 'import matplotlib.pyplot as plt\n'), ((7558, 7583), 'matplotlib.pyplot.imshow', 'plt.imshow', (['x'], {'cmap': '"""bwr"""'}), "(x, cmap='bwr')\n", (7568, 7583), True, 'import matplotlib.pyplot as plt\n'), ((7719, 7770), 'matplotlib.pyplot.yticks', 'plt.yticks', (['ticks', '[name for name, _ in covariates]'], {}), '(ticks, [name for name, _ in covariates])\n', (7729, 7770), True, 'import matplotlib.pyplot as plt\n'), ((7779, 7793), 'matplotlib.pyplot.xticks', 'plt.xticks', (['()'], {}), '(())\n', (7789, 7793), True, 'import matplotlib.pyplot as plt\n'), ((7802, 7827), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'length': '(0)'}), '(length=0)\n', (7817, 7827), True, 'import matplotlib.pyplot as plt\n'), ((7836, 7896), 'matplotlib.pyplot.title', 'plt.title', (['"""Pearson correlation (unconstrained coordinates)"""'], {}), "('Pearson correlation (unconstrained coordinates)')\n", (7845, 7896), True, 'import matplotlib.pyplot as plt\n'), ((7905, 7923), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7921, 7923), True, 'import matplotlib.pyplot as plt\n'), ((8410, 8422), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8420, 8422), True, 'import matplotlib.pyplot as plt\n'), ((8438, 8481), 'torch.arange', 'torch.arange', (['(args.duration + args.forecast)'], {}), '(args.duration + args.forecast)\n', (8450, 8481), False, 'import torch\n'), ((8662, 8734), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['time', 'p05', 'p95'], {'color': '"""red"""', 'alpha': '(0.3)', 'label': '"""90% CI"""'}), "(time, p05, p95, color='red', alpha=0.3, label='90% CI')\n", (8678, 8734), True, 'import matplotlib.pyplot as plt\n'), ((8743, 8787), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'median', '"""r-"""'], {'label': '"""median"""'}), "(time, median, 'r-', label='median')\n", (8751, 8787), True, 'import matplotlib.pyplot as plt\n'), ((8796, 8855), 'matplotlib.pyplot.plot', 'plt.plot', (['time[:args.duration]', 'obs', '"""k."""'], {'label': '"""observed"""'}), "(time[:args.duration], obs, 'k.', label='observed')\n", (8804, 8855), True, 'import matplotlib.pyplot as plt\n'), ((8950, 9002), 'matplotlib.pyplot.axvline', 'plt.axvline', (['(args.duration - 0.5)'], {'color': '"""gray"""', 'lw': '(1)'}), "(args.duration - 0.5, color='gray', lw=1)\n", (8961, 9002), True, 'import matplotlib.pyplot as plt\n'), ((9046, 9063), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', 'None'], {}), '(0, None)\n', (9054, 9063), True, 'import matplotlib.pyplot as plt\n'), ((9072, 9111), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""day after first infection"""'], {}), "('day after first infection')\n", (9082, 9111), True, 'import matplotlib.pyplot as plt\n'), ((9120, 9156), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""new infections per day"""'], {}), "('new infections per day')\n", (9130, 9156), True, 'import matplotlib.pyplot as plt\n'), ((9245, 9273), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (9255, 9273), True, 'import matplotlib.pyplot as plt\n'), ((9282, 9300), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (9298, 9300), True, 'import matplotlib.pyplot as plt\n'), ((12336, 12346), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12344, 12346), True, 'import matplotlib.pyplot as plt\n'), ((884, 977), 'pyro.contrib.epidemiology.SuperspreadingSEIRModel', 'SuperspreadingSEIRModel', (['args.population', 'args.incubation_time', 'args.recovery_time', 'data'], {}), '(args.population, args.incubation_time, args.\n recovery_time, data)\n', (907, 977), False, 'from pyro.contrib.epidemiology import OverdispersedSEIRModel, OverdispersedSIRModel, SimpleSEIRModel, SimpleSIRModel, SuperspreadingSEIRModel, SuperspreadingSIRModel\n'), ((1423, 1488), 'pyro.contrib.epidemiology.SuperspreadingSIRModel', 'SuperspreadingSIRModel', (['args.population', 'args.recovery_time', 'data'], {}), '(args.population, args.recovery_time, data)\n', (1445, 1488), False, 'from pyro.contrib.epidemiology import OverdispersedSEIRModel, OverdispersedSIRModel, SimpleSEIRModel, SimpleSIRModel, SuperspreadingSEIRModel, SuperspreadingSIRModel\n'), ((2462, 2511), 'math.ceil', 'math.ceil', (['(args.min_obs_portion * args.population)'], {}), '(args.min_obs_portion * args.population)\n', (2471, 2511), False, 'import math\n'), ((2535, 2585), 'math.floor', 'math.floor', (['(args.max_obs_portion * args.population)'], {}), '(args.max_obs_portion * args.population)\n', (2545, 2585), False, 'import math\n'), ((5333, 5385), 'seaborn.distplot', 'sns.distplot', (['samples[key]'], {'ax': 'ax', 'label': '"""posterior"""'}), "(samples[key], ax=ax, label='posterior')\n", (5345, 5385), True, 'import seaborn as sns\n'), ((8898, 8941), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'truth', '"""k--"""'], {'label': '"""truth"""'}), "(time, truth, 'k--', label='truth')\n", (8906, 8941), True, 'import matplotlib.pyplot as plt\n'), ((12051, 12105), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['torch.cuda.DoubleTensor'], {}), '(torch.cuda.DoubleTensor)\n', (12080, 12105), False, 'import torch\n'), ((12132, 12170), 'torch.set_default_dtype', 'torch.set_default_dtype', (['torch.float64'], {}), '(torch.float64)\n', (12155, 12170), False, 'import torch\n'), ((12199, 12252), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['torch.cuda.FloatTensor'], {}), '(torch.cuda.FloatTensor)\n', (12228, 12252), False, 'import torch\n'), ((1073, 1165), 'pyro.contrib.epidemiology.OverdispersedSEIRModel', 'OverdispersedSEIRModel', (['args.population', 'args.incubation_time', 'args.recovery_time', 'data'], {}), '(args.population, args.incubation_time, args.\n recovery_time, data)\n', (1095, 1165), False, 'from pyro.contrib.epidemiology import OverdispersedSEIRModel, OverdispersedSIRModel, SimpleSEIRModel, SimpleSIRModel, SuperspreadingSEIRModel, SuperspreadingSIRModel\n'), ((1236, 1321), 'pyro.contrib.epidemiology.SimpleSEIRModel', 'SimpleSEIRModel', (['args.population', 'args.incubation_time', 'args.recovery_time', 'data'], {}), '(args.population, args.incubation_time, args.recovery_time, data\n )\n', (1251, 1321), False, 'from pyro.contrib.epidemiology import OverdispersedSEIRModel, OverdispersedSIRModel, SimpleSEIRModel, SimpleSIRModel, SuperspreadingSEIRModel, SuperspreadingSIRModel\n'), ((1546, 1610), 'pyro.contrib.epidemiology.OverdispersedSIRModel', 'OverdispersedSIRModel', (['args.population', 'args.recovery_time', 'data'], {}), '(args.population, args.recovery_time, data)\n', (1567, 1610), False, 'from pyro.contrib.epidemiology import OverdispersedSEIRModel, OverdispersedSIRModel, SimpleSEIRModel, SimpleSIRModel, SuperspreadingSEIRModel, SuperspreadingSIRModel\n'), ((1644, 1701), 'pyro.contrib.epidemiology.SimpleSIRModel', 'SimpleSIRModel', (['args.population', 'args.recovery_time', 'data'], {}), '(args.population, args.recovery_time, data)\n', (1658, 1701), False, 'from pyro.contrib.epidemiology import OverdispersedSEIRModel, OverdispersedSIRModel, SimpleSEIRModel, SimpleSIRModel, SuperspreadingSEIRModel, SuperspreadingSIRModel\n'), ((6710, 6731), 'torch.distributions.biject_to', 'biject_to', (['constraint'], {}), '(constraint)\n', (6719, 6731), False, 'from torch.distributions import biject_to, constraints\n')]
|
""" Database interaction class. """
__author__ = "<NAME>"
__date__ = "2020-08-06"
__copyright__ = "Copyright 2020 United Kingdom Research and Innovation"
__license__ = "BSD - see LICENSE file in top-level package directory"
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
CONNECTION_TEMPLATE = \
"postgresql+pg8000://{user}:{password}@{host}:{port}/{database}"
class Connection:
def __init__(self, **kwargs):
self._engine = create_engine(
CONNECTION_TEMPLATE.format(**kwargs),
isolation_level="READ UNCOMMITTED"
)
def __enter__(self):
Session = sessionmaker(bind=self._engine)
self._session = Session()
return self
def __exit__(self, *args):
self._session.commit()
self._session.close()
def load_users(self, user_model):
return self._session.query(user_model).all()
|
[
"sqlalchemy.orm.sessionmaker"
] |
[((645, 676), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', ([], {'bind': 'self._engine'}), '(bind=self._engine)\n', (657, 676), False, 'from sqlalchemy.orm import sessionmaker\n')]
|
"""Test the K8SDescriptor wrapper."""
import pytest
from kubesplit.k8s_descriptor import K8SDescriptor
data_for_test_get_order_prefix = [
("Namespace"),
("ServiceAccount"),
("ClusterRole"),
("Role"),
("ClusterRoleBinding"),
("RoleBinding"),
("Deployment"),
("Service"),
("Ingress"),
("StatefulSet"),
]
def test_has_namespace_when_true():
"""test_has_namespace_when_true."""
k8s_default_svc_dummy = K8SDescriptor(
name="dummy", kind="Service", namespace="default", as_yaml=""
)
assert k8s_default_svc_dummy.has_namespace()
def test_has_namespace_when_false():
"""test_has_namespace_when_false."""
k8s_default_svc_dummy = K8SDescriptor(
name="dummy", kind="Service", namespace=None, as_yaml=""
)
assert not k8s_default_svc_dummy.has_namespace()
def test_compute_namespace_dirname_when_has_namespace():
"""test_compute_namespace_dirname_when_has_namespace."""
k8s_default_svc_dummy = K8SDescriptor(
name="dummy", kind="Service", namespace="default", as_yaml=""
)
assert k8s_default_svc_dummy.compute_namespace_dirname() == "default"
def test_compute_namespace_dirname_when_no_namespace():
"""test_compute_namespace_dirname_when_no_namespace."""
k8s_default_svc_dummy = K8SDescriptor(
name="dummy", kind="Service", namespace=None, as_yaml=""
)
assert k8s_default_svc_dummy.compute_namespace_dirname() is None
@pytest.mark.parametrize("kind", data_for_test_get_order_prefix)
def test_get_order_prefix(kind: str):
"""test_get_order_prefix."""
sut = K8SDescriptor(
name="for_test",
kind=kind,
namespace=None,
as_yaml=None,
use_order_prefix=True,
)
assert (sut.get_order_prefix() == "") is False
@pytest.mark.parametrize("kind", data_for_test_get_order_prefix)
def test_get_order_prefix_when_disabled(kind: str):
"""test_get_order_prefix_when_disabled."""
sut = K8SDescriptor(
name="for_test",
kind=kind,
namespace=None,
as_yaml=None,
use_order_prefix=False,
)
assert (sut.get_order_prefix() == "") is True
def test_is_list_with_default():
"""Test is_list with default."""
sut = K8SDescriptor(
name="for_test",
kind="ConfigMap",
namespace=None,
as_yaml=None,
use_order_prefix=False,
)
assert sut.is_list is False
def test_is_list_can_be_set():
"""Test is_list with default."""
sut = K8SDescriptor(
name="for_test",
kind="ConfigMapList",
namespace=None,
as_yaml=None,
use_order_prefix=False,
)
sut.is_list = True
assert sut.is_list is True
|
[
"pytest.mark.parametrize",
"kubesplit.k8s_descriptor.K8SDescriptor"
] |
[((1453, 1516), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""kind"""', 'data_for_test_get_order_prefix'], {}), "('kind', data_for_test_get_order_prefix)\n", (1476, 1516), False, 'import pytest\n'), ((1794, 1857), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""kind"""', 'data_for_test_get_order_prefix'], {}), "('kind', data_for_test_get_order_prefix)\n", (1817, 1857), False, 'import pytest\n'), ((449, 525), 'kubesplit.k8s_descriptor.K8SDescriptor', 'K8SDescriptor', ([], {'name': '"""dummy"""', 'kind': '"""Service"""', 'namespace': '"""default"""', 'as_yaml': '""""""'}), "(name='dummy', kind='Service', namespace='default', as_yaml='')\n", (462, 525), False, 'from kubesplit.k8s_descriptor import K8SDescriptor\n'), ((697, 768), 'kubesplit.k8s_descriptor.K8SDescriptor', 'K8SDescriptor', ([], {'name': '"""dummy"""', 'kind': '"""Service"""', 'namespace': 'None', 'as_yaml': '""""""'}), "(name='dummy', kind='Service', namespace=None, as_yaml='')\n", (710, 768), False, 'from kubesplit.k8s_descriptor import K8SDescriptor\n'), ((984, 1060), 'kubesplit.k8s_descriptor.K8SDescriptor', 'K8SDescriptor', ([], {'name': '"""dummy"""', 'kind': '"""Service"""', 'namespace': '"""default"""', 'as_yaml': '""""""'}), "(name='dummy', kind='Service', namespace='default', as_yaml='')\n", (997, 1060), False, 'from kubesplit.k8s_descriptor import K8SDescriptor\n'), ((1295, 1366), 'kubesplit.k8s_descriptor.K8SDescriptor', 'K8SDescriptor', ([], {'name': '"""dummy"""', 'kind': '"""Service"""', 'namespace': 'None', 'as_yaml': '""""""'}), "(name='dummy', kind='Service', namespace=None, as_yaml='')\n", (1308, 1366), False, 'from kubesplit.k8s_descriptor import K8SDescriptor\n'), ((1598, 1696), 'kubesplit.k8s_descriptor.K8SDescriptor', 'K8SDescriptor', ([], {'name': '"""for_test"""', 'kind': 'kind', 'namespace': 'None', 'as_yaml': 'None', 'use_order_prefix': '(True)'}), "(name='for_test', kind=kind, namespace=None, as_yaml=None,\n use_order_prefix=True)\n", (1611, 1696), False, 'from kubesplit.k8s_descriptor import K8SDescriptor\n'), ((1967, 2066), 'kubesplit.k8s_descriptor.K8SDescriptor', 'K8SDescriptor', ([], {'name': '"""for_test"""', 'kind': 'kind', 'namespace': 'None', 'as_yaml': 'None', 'use_order_prefix': '(False)'}), "(name='for_test', kind=kind, namespace=None, as_yaml=None,\n use_order_prefix=False)\n", (1980, 2066), False, 'from kubesplit.k8s_descriptor import K8SDescriptor\n'), ((2242, 2349), 'kubesplit.k8s_descriptor.K8SDescriptor', 'K8SDescriptor', ([], {'name': '"""for_test"""', 'kind': '"""ConfigMap"""', 'namespace': 'None', 'as_yaml': 'None', 'use_order_prefix': '(False)'}), "(name='for_test', kind='ConfigMap', namespace=None, as_yaml=\n None, use_order_prefix=False)\n", (2255, 2349), False, 'from kubesplit.k8s_descriptor import K8SDescriptor\n'), ((2504, 2614), 'kubesplit.k8s_descriptor.K8SDescriptor', 'K8SDescriptor', ([], {'name': '"""for_test"""', 'kind': '"""ConfigMapList"""', 'namespace': 'None', 'as_yaml': 'None', 'use_order_prefix': '(False)'}), "(name='for_test', kind='ConfigMapList', namespace=None,\n as_yaml=None, use_order_prefix=False)\n", (2517, 2614), False, 'from kubesplit.k8s_descriptor import K8SDescriptor\n')]
|
#$Id$
from books.model.PageContext import PageContext
class CurrencyList:
"""This class is used to create object for currency list."""
def __init__(self):
"""Initialize parameters for currency list."""
self.currencies = []
self.page_context = PageContext()
def set_currencies(self, currency):
"""Set currencies.
Args:
currency(instance): Currency object.
"""
self.currencies.append(currency)
def get_currencies(self):
"""Get currencies.
Returns:
list of instance: List of currency object.
"""
return self.currencies
def set_page_context(self, page_context):
"""Set page context.
Args:
page_context(instance): Page context object.
"""
self.page_context = page_context
def get_page_context(self):
"""Get page context.
Returns:
instance: Page context object.
"""
return self.page_context
|
[
"books.model.PageContext.PageContext"
] |
[((277, 290), 'books.model.PageContext.PageContext', 'PageContext', ([], {}), '()\n', (288, 290), False, 'from books.model.PageContext import PageContext\n')]
|
#! /usr/bin/env python
# -*- coding: utf8 -*-
'''
Copyright 2018 University of Liège
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
interfaceData.py
Matrix and vector representation of interface data.
Authors : <NAME>, <NAME>, <NAME>
'''
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
import numpy as np
import scipy as sp
import sys
import ccupydo
np.set_printoptions(threshold=sys.maxsize)
# ----------------------------------------------------------------------
# FlexInterfaceData class
# ----------------------------------------------------------------------
class FlexInterfaceData(ccupydo.CFlexInterfaceData):
"""
Description
"""
def __init__(self, val_nPoint, val_nDim, mpiComm=None):
"""
Des.
"""
self.mpiComm = mpiComm
ccupydo.CFlexInterfaceData.__init__(self, val_nPoint, val_nDim, mpiComm)
#self.mpiComm = mpiComm
#self.nPoint = val_nPoint
#self.nDim = val_nDim
#self.dataContainer = []
#if mpiComm != None:
# for iDim in range(self.nDim):
# from petsc4py import PETSc
# data = PETSc.Vec().create(self.mpiComm)
# data.setType('mpi')
# data.setSizes(self.nPoint)
# data.set(0.0)
# self.dataContainer.append(data)
# self.myid = self.mpiComm.Get_rank()
# self.mpiSize = self.mpiComm.Get_size()
# startIndex , stopIndex = self.dataContainer[0].getOwnershipRange()
# #!!! stopIndex is 1 more than the true index !!!
# #startIndex , stopIndex = self.getOwnershipRange()
# self.indexing = self.mpiComm.allgather((startIndex , stopIndex))
#else:
# for iDim in range(self.nDim):
# data = np.zeros(self.nPoint, dtype=float)
# self.dataContainer.append(data)
# self.myid = 0
# self.mpiSize = 1
def __setitem__(self, index, values):
"""
Des.
"""
if type(values) != list:
raise TypeError("FlexInterfaceData.__setitem__ needs list as argument !")
if len(values) != self.nDim:
raise IndexError("Length of values does not match nDim !")
else:
for iDim in range(self.nDim):
self.setValue(iDim, index, values[iDim])
def __add__(self, dataToAdd):
"""
Des.
"""
if type(dataToAdd) == type(self):
if self.nDim != dataToAdd.nDim:
raise IndexError("Dimensions do not match for + operator !")
if self.nPoint != dataToAdd.nPoint:
raise IndexError("Lengthes do not match for + operator !")
newData = FlexInterfaceData(self.nPoint, self.nDim, self.comm)
self.copy(newData)
newData.add(dataToAdd)
return newData
def __radd__(self, dataToAdd):
"""
Des.
"""
newData = self + dataToAdd
return newData
def __iadd__(self, dataToAdd):
"""
Des.
"""
if type(dataToAdd) == type(self):
if self.nDim != dataToAdd.nDim:
raise IndexError("Dimensions do not match for + operator !")
if self.nPoint != dataToAdd.nPoint:
raise IndexError("Lengthes do not match for + operator !")
self.add(dataToAdd)
return self
def __sub__(self, dataToSub):
"""
Des.
"""
if type(dataToSub) == type(self):
if self.nDim != dataToSub.nDim:
raise IndexError("Dimensions do not match for + operator !")
if self.nPoint != dataToSub.nPoint:
raise IndexError("Lengthes do not match for + operator !")
newData = FlexInterfaceData(self.nPoint, self.nDim, self.comm)
self.copy(newData)
newData.sub(dataToSub)
return newData
def __rsub__(self, dataToSub):
"""
Des.
"""
if type(dataToSub) == type(self):
if self.nDim != dataToSub.nDim:
raise IndexError("Dimensions do not match for + operator !")
if self.nPoint != dataToSub.nPoint:
raise IndexError("Lengthes do not match for + operator !")
newData = -1*self + dataToSub
return newData
def __isub__(self, dataToSub):
"""
Des.
"""
if type(dataToSub) == type(self):
if self.nDim != dataToSub.nDim:
raise IndexError("Dimensions do not match for + operator !")
if self.nPoint != dataToSub.nPoint:
raise IndexError("Lengthes do not match for + operator !")
self.sub(dataToSub)
return self
def __mul__(self, mulVal):
"""
Des.
"""
newData = FlexInterfaceData(self.nPoint, self.nDim, self.comm)
self.copy(newData)
newData.scale(mulVal)
return newData
def __rmul__(self, mulVal):
"""
Des
"""
newData = self*mulVal
return newData
def __imul__(self, mulVal):
"""
Des.
"""
self.scale(mulVal)
return self
def dot(self, dataToDot):
dotList = []
if self.mpiComm != None:
dotList = ccupydo.CFlexInterfaceData.dot(self, dataToDot)
else:
for iDim in range(self.nDim):
myData = self.getData(iDim)
dotData = dataToDot.getData(iDim)
val_dot = myData.dot(dotData)
dotList.append(val_dot)
return dotList
def sum(self):
sumList = []
if self.mpiComm != None:
sumList = ccupydo.CFlexInterfaceData.sum(self)
else:
for iDim in range(self.nDim):
myData = self.getData(iDim)
val_sum = myData.sum()
sumList.append(val_sum)
return sumList
def norm(self):
normList = []
if self.mpiComm != None:
normList = ccupydo.CFlexInterfaceData.norm(self)
else:
for iDim in range(self.nDim):
myData = self.getData(iDim)
val_norm = np.linalg.norm(myData)
normList.append(val_norm)
return normList
# ----------------------------------------------------------------------
# InterfaceMatrix class
# ----------------------------------------------------------------------
class InterfaceMatrix(ccupydo.CInterfaceMatrix):
"""
Define a matrix based on fluid-structure interface data.
Designed for parallel computations (also works in serial).
Inherited public members :
-createDense()
-createSparse()
-createSparseFullAlloc()
-setValue()
-assemble()
-getMat()
"""
def __init__(self, sizes, mpiComm=None):
"""
Overloaded constructor
"""
ccupydo.CInterfaceMatrix.__init__(self, sizes[0],sizes[1])
self.sizes = sizes
self.mpiComm = mpiComm
def mult(self, Data , DataOut):
"""
Performs interface matrix-data multiplication.
"""
if self.mpiComm != None:
ccupydo.CInterfaceMatrix.mult(self, Data, DataOut)
else:
PyH = self.getMat();
dim = Data.getDim()
for iDim in range(dim):
np.dot(PyH, Data.getData(iDim), DataOut.getData(iDim))
|
[
"ccupydo.CFlexInterfaceData.dot",
"ccupydo.CInterfaceMatrix.mult",
"numpy.set_printoptions",
"ccupydo.CFlexInterfaceData.__init__",
"ccupydo.CInterfaceMatrix.__init__",
"numpy.linalg.norm",
"ccupydo.CFlexInterfaceData.norm",
"ccupydo.CFlexInterfaceData.sum"
] |
[((947, 989), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'sys.maxsize'}), '(threshold=sys.maxsize)\n', (966, 989), True, 'import numpy as np\n'), ((1397, 1469), 'ccupydo.CFlexInterfaceData.__init__', 'ccupydo.CFlexInterfaceData.__init__', (['self', 'val_nPoint', 'val_nDim', 'mpiComm'], {}), '(self, val_nPoint, val_nDim, mpiComm)\n', (1432, 1469), False, 'import ccupydo\n'), ((7623, 7682), 'ccupydo.CInterfaceMatrix.__init__', 'ccupydo.CInterfaceMatrix.__init__', (['self', 'sizes[0]', 'sizes[1]'], {}), '(self, sizes[0], sizes[1])\n', (7656, 7682), False, 'import ccupydo\n'), ((5954, 6001), 'ccupydo.CFlexInterfaceData.dot', 'ccupydo.CFlexInterfaceData.dot', (['self', 'dataToDot'], {}), '(self, dataToDot)\n', (5984, 6001), False, 'import ccupydo\n'), ((6372, 6408), 'ccupydo.CFlexInterfaceData.sum', 'ccupydo.CFlexInterfaceData.sum', (['self'], {}), '(self)\n', (6402, 6408), False, 'import ccupydo\n'), ((6725, 6762), 'ccupydo.CFlexInterfaceData.norm', 'ccupydo.CFlexInterfaceData.norm', (['self'], {}), '(self)\n', (6756, 6762), False, 'import ccupydo\n'), ((7903, 7953), 'ccupydo.CInterfaceMatrix.mult', 'ccupydo.CInterfaceMatrix.mult', (['self', 'Data', 'DataOut'], {}), '(self, Data, DataOut)\n', (7932, 7953), False, 'import ccupydo\n'), ((6890, 6912), 'numpy.linalg.norm', 'np.linalg.norm', (['myData'], {}), '(myData)\n', (6904, 6912), True, 'import numpy as np\n')]
|
from celery import shared_task
from time import sleep
from django.core.mail import send_mail
from django.conf import settings
@shared_task
def sleepy(duration):
sleep(duration)
print("I'm busy for background working...")
return None
@shared_task
def email_send_our_user():
send_mail(
'Testing celery working...',
'celery is very awesome asyc task...',
settings.EMAIL_HOST_USER,
['<EMAIL>'],
fail_silently=False,
)
print("I'm busy for mail sending...")
return None
|
[
"django.core.mail.send_mail",
"time.sleep"
] |
[((167, 182), 'time.sleep', 'sleep', (['duration'], {}), '(duration)\n', (172, 182), False, 'from time import sleep\n'), ((293, 439), 'django.core.mail.send_mail', 'send_mail', (['"""Testing celery working..."""', '"""celery is very awesome asyc task..."""', 'settings.EMAIL_HOST_USER', "['<EMAIL>']"], {'fail_silently': '(False)'}), "('Testing celery working...',\n 'celery is very awesome asyc task...', settings.EMAIL_HOST_USER, [\n '<EMAIL>'], fail_silently=False)\n", (302, 439), False, 'from django.core.mail import send_mail\n')]
|
from kafka import KafkaConsumer
from pymongo import MongoClient
from json import loads
if __name__ == "__main__":
# initialize consumer object
consumer = KafkaConsumer('raw_data',
bootstrap_servers=['localhost:9092'],
auto_offset_reset='earliest',
enable_auto_commit=True,
group_id='my-group',
value_deserializer=lambda x: loads(x.decode('utf-8')))
# start mongo client
client = MongoClient('localhost:27017')
# connect to the collection
collection = client.data.raw
# loop over all consumer messages to save them into the DB
for message in consumer:
message = message.value
collection.insert_one(message)
print('{} added to {}'.format(message, collection))
|
[
"pymongo.MongoClient"
] |
[((542, 572), 'pymongo.MongoClient', 'MongoClient', (['"""localhost:27017"""'], {}), "('localhost:27017')\n", (553, 572), False, 'from pymongo import MongoClient\n')]
|
from __future__ import division
from future.utils import iteritems, itervalues
from builtins import map, zip
import numpy as np
import itertools
import collections
import operator
import copy
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec, GridSpecFromSubplotSpec
from matplotlib import cm
from warnings import warn
from scipy.special import logsumexp
from pyhsmm.basic.abstractions import Model, ModelGibbsSampling, \
ModelEM, ModelMAPEM, ModelMeanField, ModelMeanFieldSVI, ModelParallelTempering
from pyhsmm.internals import hmm_states, hsmm_states, hsmm_inb_states, \
initial_state, transitions
from pyhsmm.util.general import list_split
from pyhsmm.util.profiling import line_profiled
from pybasicbayes.util.stats import atleast_2d
from pybasicbayes.distributions.gaussian import Gaussian
from pyhsmm.util.plot import annotate_heatmap, heatmap
################
# HMM Mixins #
################
class _HMMBase(Model):
_states_class = hmm_states.HMMStatesPython
_trans_class = transitions.HMMTransitions
_trans_conc_class = transitions.HMMTransitionsConc
_init_state_class = initial_state.HMMInitialState
def __init__(self,
obs_distns,
trans_distn=None,
alpha=None,alpha_a_0=None,alpha_b_0=None,trans_matrix=None,
init_state_distn=None,init_state_concentration=None,pi_0=None):
self.obs_distns = obs_distns
self.states_list = []
if trans_distn is not None:
self.trans_distn = trans_distn
elif not None in (alpha_a_0,alpha_b_0):
self.trans_distn = self._trans_conc_class(
num_states=len(obs_distns),
alpha_a_0=alpha_a_0,alpha_b_0=alpha_b_0,
trans_matrix=trans_matrix)
else:
self.trans_distn = self._trans_class(
num_states=len(obs_distns),alpha=alpha,trans_matrix=trans_matrix)
if init_state_distn is not None:
if init_state_distn == 'uniform':
self.init_state_distn = initial_state.UniformInitialState(model=self)
else:
self.init_state_distn = init_state_distn
else:
self.init_state_distn = self._init_state_class(
model=self,
init_state_concentration=init_state_concentration,
pi_0=pi_0)
self._clear_caches()
def plot_trans_distn(self, states_list):
fig, ax = plt.subplots()
tmat = self.trans_distn.trans_matrix
im, cbar = heatmap(tmat, states_list, states_list, ax=ax,
cmap="Blues", cbarlabel="Transition probability")
texts = annotate_heatmap(im, valfmt="{x:.2f} ")
fig.tight_layout()
plt.show()
return fig
def add_data(self,data,stateseq=None,fixed_stateseq=False,**kwargs):
self.states_list.append(
self._states_class(
model=self,data=data,
stateseq=stateseq, fixed_stateseq=fixed_stateseq,
**kwargs))
return self.states_list[-1]
def generate(self,T,keep=True):
s = self._states_class(model=self,T=T,initialize_from_prior=True)
data = self._generate_obs(s)
if keep:
self.states_list.append(s)
return data, s.stateseq
def _generate_obs(self,s):
if s.data is None:
# generating brand new data sequence
counts = np.bincount(s.stateseq,minlength=self.num_states)
obs = [iter(o.rvs(count)) for o, count in zip(s.obs_distns,counts)]
s.data = np.squeeze(np.vstack([next(obs[state]) for state in s.stateseq]))
else:
# filling in missing data
data = s.data
nan_idx, = np.where(np.isnan(atleast_2d(data)).any(1))
counts = np.bincount(s.stateseq[nan_idx],minlength=self.num_states)
obs = [iter(o.rvs(count)) for o, count in zip(s.obs_distns,counts)]
for idx, state in zip(nan_idx, s.stateseq[nan_idx]):
data[idx] = next(obs[state])
return s.data
def log_likelihood(self,data=None,**kwargs):
if data is not None:
if isinstance(data,np.ndarray):
self.add_data(data=data,generate=False,**kwargs)
return self.states_list.pop().log_likelihood()
else:
assert isinstance(data,list)
loglike = 0.
for d in data:
self.add_data(data=d,generate=False,**kwargs)
loglike += self.states_list.pop().log_likelihood()
return loglike
else:
return sum(s.log_likelihood() for s in self.states_list)
def predict(self,seed_data,timesteps,**kwargs):
padshape = (timesteps, seed_data.shape[1]) if seed_data.ndim == 2 else timesteps
full_data = np.concatenate((seed_data,np.nan*np.ones(padshape)))
self.add_data(full_data,**kwargs)
s = self.states_list.pop()
s.resample() # fills in states
return self._generate_obs(s), s.stateseq # fills in nan obs
def predictive_likelihoods(self,test_data,forecast_horizons,num_procs=None,**kwargs):
assert all(k > 0 for k in forecast_horizons)
self.add_data(data=test_data,**kwargs)
s = self.states_list.pop()
alphal = s.messages_forwards_log()
cmaxes = alphal.max(axis=1)
scaled_alphal = np.exp(alphal - cmaxes[:,None])
if not num_procs:
prev_k = 0
outs = []
for k in forecast_horizons:
step = k - prev_k
cmaxes = cmaxes[:-step]
scaled_alphal = scaled_alphal[:-step].dot(np.linalg.matrix_power(s.trans_matrix,step))
future_likelihoods = logsumexp(
np.log(scaled_alphal) + cmaxes[:,None] + s.aBl[k:],axis=1)
past_likelihoods = logsumexp(alphal[:-k],axis=1)
outs.append(future_likelihoods - past_likelihoods)
prev_k = k
else:
from joblib import Parallel, delayed
from . import parallel
parallel.cmaxes = cmaxes
parallel.alphal = alphal
parallel.scaled_alphal = scaled_alphal
parallel.trans_matrix = s.trans_matrix
parallel.aBl = s.aBl
outs = Parallel(n_jobs=num_procs,backend='multiprocessing')\
(delayed(parallel._get_predictive_likelihoods)(k)
for k in forecast_horizons)
return outs
@property
def stateseqs(self):
return [s.stateseq for s in self.states_list]
@property
def stateseqs_norep(self):
return [s.stateseq_norep for s in self.states_list]
@property
def durations(self):
return [s.durations for s in self.states_list]
@property
def datas(self):
return [s.data for s in self.states_list]
@property
def num_states(self):
return len(self.obs_distns)
@property
def num_parameters(self):
return sum(o.num_parameters() for o in self.obs_distns) \
+ self.num_states**2 - self.num_states
@property
def used_states(self):
'a list of the used states in the order they appear'
c = itertools.count()
canonical_ids = collections.defaultdict(lambda: next(c))
for s in self.states_list:
for state in s.stateseq:
canonical_ids[state]
return list(map(operator.itemgetter(0),
sorted(canonical_ids.items(),key=operator.itemgetter(1))))
@property
def state_usages(self):
if len(self.states_list) > 0:
state_usages = sum(np.bincount(s.stateseq,minlength=self.num_states)
for s in self.states_list)
return state_usages / state_usages.sum()
else:
return np.ones(self.num_states)
### predicting
def heldout_viterbi(self,data,**kwargs):
self.add_data(data=data,stateseq=np.zeros(len(data)),**kwargs)
s = self.states_list.pop()
s.Viterbi()
return s.stateseq
def heldout_state_marginals(self,data,**kwargs):
self.add_data(data=data,stateseq=np.zeros(len(data)),**kwargs)
s = self.states_list.pop()
s.E_step()
return s.expected_states
def _resample_from_mf(self):
self.trans_distn._resample_from_mf()
self.init_state_distn._resample_from_mf()
for o in self.obs_distns:
o._resample_from_mf()
### caching
def _clear_caches(self):
for s in self.states_list:
s.clear_caches()
def __getstate__(self):
self._clear_caches()
return self.__dict__.copy()
### plotting
_fig_sz = 6
def make_figure(self, fig_size=[12,6],**kwargs):
if len(self.states_list) <= 2:
fig = plt.figure(figsize=fig_size,**kwargs)
else:
fig = plt.figure(figsize=fig_size,**kwargs)
return fig
def plot(self,fig=None,plot_slice=slice(None),update=False,draw=True, fig_size=[12,6]):
update = update and (fig is not None)
fig = fig if fig else self.make_figure(fig_size=fig_size)
feature_ax, stateseq_axs = self._get_axes(fig)
try:
sp1_artists = self.plot_observations(feature_ax,plot_slice=plot_slice,update=update)
except IndexError:
sp1_artists = []
assert len(stateseq_axs) == len(self.states_list)
sp2_artists = \
[artist for s,ax,data in zip(self.states_list,stateseq_axs,self.datas)
for artist in self.plot_stateseq(s,ax,plot_slice,update=update,draw=False)]
if draw: plt.draw()
return sp1_artists + sp2_artists
def _get_axes(self,fig):
# TODO is attaching theseplot to the figure a good idea? why not save them
# here and reuse them if we recognize the figure being passed in
sz = self._fig_sz
if hasattr(fig,'_feature_ax') and hasattr(fig,'_stateseq_axs'):
return fig._feature_ax, fig._stateseq_axs
else:
if len(self.states_list) <= 2:
gs = GridSpec(sz+len(self.states_list),1)
feature_ax = plt.subplot(gs[:sz,:])
stateseq_axs = [plt.subplot(gs[sz+idx]) for idx in range(len(self.states_list))]
else:
gs = GridSpec(1,2)
sgs = GridSpecFromSubplotSpec(len(self.states_list),1,subplot_spec=gs[1])
feature_ax = plt.subplot(gs[0])
stateseq_axs = [plt.subplot(sgs[idx]) for idx in range(len(self.states_list))]
for ax in stateseq_axs:
ax.grid('off')
fig._feature_ax, fig._stateseq_axs = feature_ax, stateseq_axs
return feature_ax, stateseq_axs
def plot_observations(self,ax=None,color=None,plot_slice=slice(None),update=False):
ax = ax if ax else plt.gca()
state_colors = self._get_colors(color)
scatter_artists = self._plot_2d_data_scatter(ax,state_colors,plot_slice,update)
param_artists = self._plot_2d_obs_params(ax,state_colors,update)
return scatter_artists + param_artists
def _plot_2d_data_scatter(self,ax=None,state_colors=None,plot_slice=slice(None),update=False):
# TODO this is a special-case hack. breaks for 1D obs. only looks at
# first two components of ND obs.
# should only do this if the obs collection has a 2D_feature method
ax = ax if ax else plt.gca()
state_colors = state_colors if state_colors else self._get_colors()
artists = []
for s, data in zip(self.states_list,self.datas):
data = data[plot_slice]
colorseq = [state_colors[state] for state in s.stateseq[plot_slice]]
if update and hasattr(s,'_data_scatter'):
s._data_scatter.set_offsets(data[:,:2])
s._data_scatter.set_color(colorseq)
else:
s._data_scatter = ax.scatter(data[:,0],data[:,1],c=colorseq,s=5)
artists.append(s._data_scatter)
return artists
def _plot_2d_obs_params(self,ax=None,state_colors=None,update=False):
if not all(hasattr(o,'plot') for o in self.obs_distns):
return []
keepaxis = ax is not None
ax = ax if ax else plt.gca()
axis = ax.axis()
state_colors = state_colors if state_colors else self._get_colors()
usages = self.state_usages
artists = []
for state, (o, w) in enumerate(zip(self.obs_distns,usages)):
if o.D > 2:
if isinstance(o, Gaussian):
o = Gaussian(o.mu[:2], o.sigma[:2, :2])
else:
warn("High-dimensional distribution may not plot correctly in 2D")
artists.extend(
o.plot(
color=state_colors[state], label='%d' % state,
alpha=min(0.25,1.-(1.-w)**2)/0.25,
ax=ax, update=update,draw=False))
if keepaxis: ax.axis(axis)
return artists
def _get_colors(self,color=None,scalars=False,color_method=None):
color_method = color_method if color_method else 'usage'
if color is None:
cmap = cm.get_cmap('tab20')
if color_method == 'usage':
freqs = self.state_usages
used_states = sorted(self.used_states, key=lambda x: freqs[x], reverse=True)
elif color_method == 'order':
used_states = self.used_states
else:
raise ValueError("color_method must be 'usage' or 'order'")
unused_states = [idx for idx in range(self.num_states) if idx not in used_states]
#colorseq = np.random.RandomState(0).permutation(np.linspace(0,1,self.num_states))
colorseq = np.linspace(0, 1, self.num_states)
colors = dict((idx, v if scalars else cmap(v)) for idx, v in zip(sorted(self.used_states), colorseq))
#colors = dict((idx, v if scalars else cmap(v)) for idx, v in zip(used_states,colorseq))
for state in unused_states:
colors[state] = cmap(1.)
return colors
elif isinstance(color,dict):
return color
else:
return dict((idx,color) for idx in range(self.num_states))
def plot_stateseq(self,s,ax=None,plot_slice=slice(None),update=False,draw=True):
s = self.states_list[s] if isinstance(s,int) else s
ax = ax if ax else plt.gca()
state_colors = self._get_colors(scalars=True)
self._plot_stateseq_pcolor(s,ax,state_colors,plot_slice,update)
try:
data_values_artist = self._plot_stateseq_data_values(s,ax,state_colors,plot_slice,update)
except Exception:
data_values_artist = None
if draw: plt.draw()
return [data_values_artist]
def _plot_stateseq_pcolor(self,s,ax=None,state_colors=None,
plot_slice=slice(None),update=False,color_method=None):
from pyhsmm.util.general import rle
s = self.states_list[s] if isinstance(s,int) else s
ax = ax if ax else plt.gca()
state_colors = state_colors if state_colors \
else self._get_colors(scalars=True,color_method=color_method)
if update and hasattr(s,'_pcolor_im') and s._pcolor_im in ax.images:
s._pcolor_im.remove()
data = s.data[plot_slice]
stateseq = s.stateseq[plot_slice]
stateseq_norep, durations = rle(stateseq)
datamin, datamax = data.min(), data.max()
x, y = np.hstack((0,durations.cumsum())), np.array([datamin,datamax])
C = np.atleast_2d([state_colors[state] for state in stateseq_norep])
s._pcolor_im = ax.pcolormesh(x,y,C,vmin=0,vmax=1,alpha=0.9, cmap="tab20")
ax.set_ylim((datamin,datamax))
ax.set_xlim((0,len(stateseq)))
ax.set_yticks([])
ax.set_xticks([])
def _plot_stateseq_data_values(self,s,ax,state_colors,plot_slice,update):
from matplotlib.collections import LineCollection
from pyhsmm.util.general import AR_striding, rle
data = s.data[plot_slice]
stateseq = s.stateseq[plot_slice]
colorseq = np.tile(np.array([state_colors[state] for state in stateseq[:-1]]),data.shape[1])
if update and hasattr(s,'_data_lc'):
s._data_lc.set_array(colorseq)
else:
ts = np.arange(len(stateseq))
segments = np.vstack(
[AR_striding(np.hstack((ts[:,None], scalarseq[:,None])),1).reshape(-1,2,2)
for scalarseq in data.T])
lc = s._data_lc = LineCollection(segments)
lc.set_array(colorseq)
lc.set_linewidth(0.5)
ax.add_collection(lc)
return s._data_lc
class _HMMGibbsSampling(_HMMBase,ModelGibbsSampling):
@line_profiled
def resample_model(self,num_procs=0):
self.resample_parameters()
self.resample_states(num_procs=num_procs)
@line_profiled
def resample_parameters(self):
self.resample_obs_distns()
self.resample_trans_distn()
self.resample_init_state_distn()
def resample_obs_distns(self):
for state, distn in enumerate(self.obs_distns):
distn.resample([s.data[s.stateseq == state] for s in self.states_list])
self._clear_caches()
@line_profiled
def resample_trans_distn(self):
self.trans_distn.resample([s.stateseq for s in self.states_list])
self._clear_caches()
def resample_init_state_distn(self):
self.init_state_distn.resample([s.stateseq[0] for s in self.states_list])
self._clear_caches()
def resample_states(self,num_procs=0):
if num_procs == 0:
for s in self.states_list:
s.resample()
else:
self._joblib_resample_states(self.states_list,num_procs)
def copy_sample(self):
new = copy.copy(self)
new.obs_distns = [o.copy_sample() for o in self.obs_distns]
new.trans_distn = self.trans_distn.copy_sample()
new.init_state_distn = self.init_state_distn.copy_sample(new)
new.states_list = [s.copy_sample(new) for s in self.states_list]
return new
### joblib parallel legacy here
def _joblib_resample_states(self,states_list,num_procs):
from joblib import Parallel, delayed
from . import parallel
# warn('joblib is segfaulting on OS X only, not sure why')
if len(states_list) > 0:
joblib_args = list_split(
[self._get_joblib_pair(s) for s in states_list],
num_procs)
parallel.model = self
parallel.args = joblib_args
raw_stateseqs = Parallel(n_jobs=num_procs,backend='multiprocessing')\
(delayed(parallel._get_sampled_stateseq)(idx)
for idx in range(len(joblib_args)))
for s, (stateseq, log_likelihood) in zip(
[s for grp in list_split(states_list,num_procs) for s in grp],
[seq for grp in raw_stateseqs for seq in grp]):
s.stateseq, s._normalizer = stateseq, log_likelihood
def _get_joblib_pair(self,states_obj):
return (states_obj.data,states_obj._kwargs)
class _HMMMeanField(_HMMBase,ModelMeanField):
def meanfield_coordinate_descent_step(self,compute_vlb=True,num_procs=0):
# we want to update the states factor last to make the VLB
# computation efficient, but to update the parameters first we have to
# ensure everything in states_list has expected statistics computed
self._meanfield_update_states_list(
[s for s in self.states_list if not hasattr(s, 'expected_states')],
num_procs)
self.meanfield_update_parameters()
self.meanfield_update_states(num_procs)
if compute_vlb:
return self.vlb(states_last_updated=True)
def meanfield_update_parameters(self):
self.meanfield_update_obs_distns()
self.meanfield_update_trans_distn()
self.meanfield_update_init_state_distn()
def meanfield_update_obs_distns(self):
for state, o in enumerate(self.obs_distns):
o.meanfieldupdate(
[s.data for s in self.states_list],
[s.expected_states[:,state] for s in self.states_list])
self._clear_caches()
def meanfield_update_trans_distn(self):
self.trans_distn.meanfieldupdate(
[s.expected_transcounts for s in self.states_list])
self._clear_caches()
def meanfield_update_init_state_distn(self):
self.init_state_distn.meanfieldupdate(
[s.expected_states[0] for s in self.states_list])
self._clear_caches()
def meanfield_update_states(self,num_procs=0):
self._meanfield_update_states_list(self.states_list,num_procs=num_procs)
def _meanfield_update_states_list(self,states_list,num_procs=0):
if num_procs == 0:
for s in states_list:
s.meanfieldupdate()
else:
self._joblib_meanfield_update_states(states_list,num_procs)
def vlb(self, states_last_updated=False):
vlb = 0.
vlb += sum(s.get_vlb(states_last_updated) for s in self.states_list)
vlb += self.trans_distn.get_vlb()
vlb += self.init_state_distn.get_vlb()
vlb += sum(o.get_vlb() for o in self.obs_distns)
return vlb
### joblib parallel legacy
def _joblib_meanfield_update_states(self,states_list,num_procs):
if len(states_list) > 0:
from joblib import Parallel, delayed
from . import parallel
joblib_args = list_split(
[self._get_joblib_pair(s) for s in states_list],
num_procs)
parallel.model = self
parallel.args = joblib_args
allstats = Parallel(n_jobs=num_procs,backend='multiprocessing')\
(delayed(parallel._get_stats)(idx) for idx in range(len(joblib_args)))
for s, stats in zip(
[s for grp in list_split(states_list) for s in grp],
[s for grp in allstats for s in grp]):
s.all_expected_stats = stats
def _get_joblib_pair(self,states_obj):
return (states_obj.data,states_obj._kwargs)
class _HMMSVI(_HMMBase,ModelMeanFieldSVI):
# NOTE: classes with this mixin should also have the _HMMMeanField mixin for
# joblib/multiprocessing legacy to work
def meanfield_sgdstep(self,minibatch,prob,stepsize,num_procs=0,**kwargs):
## compute the local mean field step for the minibatch
mb_states_list = self._get_mb_states_list(minibatch,**kwargs)
if num_procs == 0:
for s in mb_states_list:
s.meanfieldupdate()
else:
self._joblib_meanfield_update_states(mb_states_list,num_procs)
## take a global step on the parameters
self._meanfield_sgdstep_parameters(mb_states_list,prob,stepsize)
def _get_mb_states_list(self,minibatch,**kwargs):
minibatch = minibatch if isinstance(minibatch,list) else [minibatch]
mb_states_list = []
for mb in minibatch:
self.add_data(mb,generate=False,**kwargs)
mb_states_list.append(self.states_list.pop())
return mb_states_list
def _meanfield_sgdstep_parameters(self,mb_states_list,prob,stepsize):
self._meanfield_sgdstep_obs_distns(mb_states_list,prob,stepsize)
self._meanfield_sgdstep_trans_distn(mb_states_list,prob,stepsize)
self._meanfield_sgdstep_init_state_distn(mb_states_list,prob,stepsize)
def _meanfield_sgdstep_obs_distns(self,mb_states_list,prob,stepsize):
for state, o in enumerate(self.obs_distns):
o.meanfield_sgdstep(
[s.data for s in mb_states_list],
[s.expected_states[:,state] for s in mb_states_list],
prob,stepsize)
def _meanfield_sgdstep_trans_distn(self,mb_states_list,prob,stepsize):
self.trans_distn.meanfield_sgdstep(
[s.expected_transcounts for s in mb_states_list],
prob,stepsize)
def _meanfield_sgdstep_init_state_distn(self,mb_states_list,prob,stepsize):
self.init_state_distn.meanfield_sgdstep(
[s.expected_states[0] for s in mb_states_list],
prob,stepsize)
class _HMMEM(_HMMBase,ModelEM, ModelMAPEM):
def EM_step(self):
assert len(self.states_list) > 0, 'Must have data to run EM'
self._clear_caches()
self._E_step()
self._M_step()
def MAP_EM_step(self):
assert len(self.states_list) > 0, 'Must have data to run BEM'
self._clear_caches()
self._E_step()
self._BM_step()
def _BM_step(self):
self._BM_step_obs_distns()
self._BM_step_init_state_distn()
self._BM_step_trans_distn()
def _BM_step_obs_distns(self):
for state, distn in enumerate(self.obs_distns):
distn.MAP([s.data for s in self.states_list],
[s.expected_states[:,state] for s in self.states_list])
def _BM_step_init_state_distn(self):
self.init_state_distn.MAP(
expected_states_list=[s.expected_states[0] for s in self.states_list])
def _BM_step_trans_distn(self):
self.trans_distn.MAP(
expected_transcounts=[s.expected_transcounts for s in self.states_list])
def _E_step(self):
for s in self.states_list:
s.E_step()
def _M_step(self):
self._M_step_obs_distns()
self._M_step_init_state_distn()
self._M_step_trans_distn()
def _M_step_obs_distns(self):
for state, distn in enumerate(self.obs_distns):
distn.max_likelihood([s.data for s in self.states_list],
[s.expected_states[:,state] for s in self.states_list])
def _M_step_init_state_distn(self):
self.init_state_distn.max_likelihood(
expected_states_list=[s.expected_states[0] for s in self.states_list])
def _M_step_trans_distn(self):
self.trans_distn.max_likelihood(
expected_transcounts=[s.expected_transcounts for s in self.states_list])
def BIC(self,data=None):
'''
BIC on the passed data. If passed data is None (default), calculates BIC
on the model's assigned data
'''
# NOTE: in principle this method computes the BIC only after finding the
# maximum likelihood parameters (or, of course, an EM fixed-point as an
# approximation!)
assert data is None and len(self.states_list) > 0, 'Must have data to get BIC'
if data is None:
return -2*sum(self.log_likelihood(s.data).sum() for s in self.states_list) + \
self.num_parameters() * np.log(
sum(s.data.shape[0] for s in self.states_list))
else:
return -2*self.log_likelihood(data) + self.num_parameters() * np.log(data.shape[0])
class _HMMViterbiEM(_HMMBase,ModelMAPEM):
def Viterbi_EM_fit(self, tol=0.1, maxiter=20):
return self.MAP_EM_fit(tol, maxiter)
def Viterbi_EM_step(self):
assert len(self.states_list) > 0, 'Must have data to run Viterbi EM'
self._clear_caches()
self._Viterbi_E_step()
self._Viterbi_M_step()
def _Viterbi_E_step(self):
for s in self.states_list:
s.Viterbi()
def _Viterbi_M_step(self):
self._Viterbi_M_step_obs_distns()
self._Viterbi_M_step_init_state_distn()
self._Viterbi_M_step_trans_distn()
def _Viterbi_M_step_obs_distns(self):
for state, distn in enumerate(self.obs_distns):
distn.max_likelihood([s.data[s.stateseq == state] for s in self.states_list])
def _Viterbi_M_step_init_state_distn(self):
self.init_state_distn.max_likelihood(
samples=np.array([s.stateseq[0] for s in self.states_list]))
def _Viterbi_M_step_trans_distn(self):
self.trans_distn.max_likelihood([s.stateseq for s in self.states_list])
MAP_EM_step = Viterbi_EM_step # for the ModelMAPEM interface
class _WeakLimitHDPMixin(object):
def __init__(self,
obs_distns,
trans_distn=None,alpha=None,alpha_a_0=None,alpha_b_0=None,
gamma=None,gamma_a_0=None,gamma_b_0=None,trans_matrix=None,
**kwargs):
if trans_distn is not None:
trans_distn = trans_distn
elif not None in (alpha_a_0,alpha_b_0):
trans_distn = self._trans_conc_class(
num_states=len(obs_distns),
alpha_a_0=alpha_a_0,alpha_b_0=alpha_b_0,
gamma_a_0=gamma_a_0,gamma_b_0=gamma_b_0,
trans_matrix=trans_matrix)
else:
trans_distn = self._trans_class(
num_states=len(obs_distns),alpha=alpha,gamma=gamma,
trans_matrix=trans_matrix)
super(_WeakLimitHDPMixin,self).__init__(
obs_distns=obs_distns,trans_distn=trans_distn,**kwargs)
class _HMMPossibleChangepointsMixin(object):
_states_class = hmm_states.HMMStatesPossibleChangepoints
def add_data(self,data,changepoints=None,**kwargs):
return super(_HMMPossibleChangepointsMixin,self).add_data(
data=data,changepoints=changepoints,**kwargs)
def _get_mb_states_list(self,minibatch,changepoints=None,**kwargs):
if changepoints is not None:
if not isinstance(minibatch,(list,tuple)):
assert isinstance(minibatch,np.ndarray)
assert isinstance(changepoints,list) and isinstance(changepoints[0],tuple)
minibatch = [minibatch]
changepoints = [changepoints]
else:
assert isinstance(changepoints,(list,tuple)) \
and isinstance(changepoints[0],(list,tuple)) \
and isinstance(changepoints[0][0],tuple)
assert len(minibatch) == len(changepoints)
changepoints = changepoints if changepoints is not None \
else [None]*len(minibatch)
mb_states_list = []
for data, changes in zip(minibatch,changepoints):
self.add_data(data,changepoints=changes,generate=False,**kwargs)
mb_states_list.append(self.states_list.pop())
return mb_states_list
def log_likelihood(self,data=None,changepoints=None,**kwargs):
if data is not None:
if isinstance(data,np.ndarray):
assert isinstance(changepoints,list) or changepoints is None
self.add_data(data=data,changepoints=changepoints,
generate=False,**kwargs)
return self.states_list.pop().log_likelihood()
else:
assert isinstance(data,list) and (changepoints is None
or isinstance(changepoints,list) and len(changepoints) == len(data))
changepoints = changepoints if changepoints is not None \
else [None]*len(data)
loglike = 0.
for d, c in zip(data,changepoints):
self.add_data(data=d,changepoints=c,generate=False,**kwargs)
loglike += self.states_list.pop().log_likelihood()
return loglike
else:
return sum(s.log_likelihood() for s in self.states_list)
class _HMMParallelTempering(_HMMBase,ModelParallelTempering):
@property
def temperature(self):
return self._temperature if hasattr(self,'_temperature') else 1.
@temperature.setter
def temperature(self,T):
self._temperature = T
self._clear_caches()
def swap_sample_with(self,other):
self.obs_distns, other.obs_distns = other.obs_distns, self.obs_distns
self.trans_distn, other.trans_distn = other.trans_distn, self.trans_distn
self.init_state_distn, other.init_state_distn = \
other.init_state_distn, self.init_state_distn
self.init_state_distn.model = self
other.init_state_distn.model = other
for s1, s2 in zip(self.states_list, other.states_list):
s1.stateseq, s2.stateseq = s2.stateseq, s1.stateseq
self._clear_caches()
@property
def energy(self):
energy = 0.
for s in self.states_list:
for state, datum in zip(s.stateseq,s.data):
energy += self.obs_distns[state].energy(datum)
return energy
################
# HMM models #
################
class HMMPython(_HMMGibbsSampling,_HMMSVI,_HMMMeanField,_HMMEM,
_HMMViterbiEM,_HMMParallelTempering):
pass
class HMM(HMMPython):
_states_class = hmm_states.HMMStatesEigen
class WeakLimitHDPHMMPython(_WeakLimitHDPMixin,HMMPython):
# NOTE: shouldn't really inherit EM or ViterbiEM, but it's convenient!
_trans_class = transitions.WeakLimitHDPHMMTransitions
_trans_conc_class = transitions.WeakLimitHDPHMMTransitionsConc
class WeakLimitHDPHMM(_WeakLimitHDPMixin,HMM):
_trans_class = transitions.WeakLimitHDPHMMTransitions
_trans_conc_class = transitions.WeakLimitHDPHMMTransitionsConc
class DATruncHDPHMMPython(_WeakLimitHDPMixin,HMMPython):
# NOTE: weak limit mixin is poorly named; we just want its init method
_trans_class = transitions.DATruncHDPHMMTransitions
_trans_conc_class = None
class DATruncHDPHMM(_WeakLimitHDPMixin,HMM):
_trans_class = transitions.DATruncHDPHMMTransitions
_trans_conc_class = None
class WeakLimitStickyHDPHMM(WeakLimitHDPHMM):
# TODO concentration resampling, too!
def __init__(self,obs_distns,
kappa=None,alpha=None,gamma=None,trans_matrix=None,
alpha_a_0=None,alpha_b_0=None,gamma_a_0=None,gamma_b_0=None,
**kwargs):
assert (None not in (alpha,gamma)) ^ \
(None not in (alpha_a_0,alpha_b_0,gamma_a_0,gamma_b_0))
if None not in (alpha,gamma):
trans_distn = transitions.WeakLimitStickyHDPHMMTransitions(
num_states=len(obs_distns),
kappa=kappa,alpha=alpha,gamma=gamma,trans_matrix=trans_matrix)
else:
trans_distn = transitions.WeakLimitStickyHDPHMMTransitionsConc(
num_states=len(obs_distns),
kappa=kappa,
alpha_a_0=alpha_a_0,alpha_b_0=alpha_b_0,
gamma_a_0=gamma_a_0,gamma_b_0=gamma_b_0,
trans_matrix=trans_matrix)
super(WeakLimitStickyHDPHMM,self).__init__(
obs_distns=obs_distns,trans_distn=trans_distn,**kwargs)
class HMMPossibleChangepoints(_HMMPossibleChangepointsMixin,HMM):
pass
#################
# HSMM Mixins #
#################
class _HSMMBase(_HMMBase):
_states_class = hsmm_states.HSMMStatesPython
_trans_class = transitions.HSMMTransitions
_trans_conc_class = transitions.HSMMTransitionsConc
# _init_steady_state_class = initial_state.HSMMSteadyState # TODO
def __init__(self,dur_distns,**kwargs):
self.dur_distns = dur_distns
super(_HSMMBase,self).__init__(**kwargs)
def add_data(self,data,stateseq=None,trunc=None,
right_censoring=True,left_censoring=False,**kwargs):
self.states_list.append(self._states_class(
model=self,
data=np.asarray(data),
stateseq=stateseq,
right_censoring=right_censoring,
left_censoring=left_censoring,
trunc=trunc,
**kwargs))
return self.states_list[-1]
def summary(self, state, state_list):
print('Model summary for state '+str(state_list[state]))
print('-----------------------------------------')
print(' Duration model')
print(' '+self.dur_distns[state].toString())
print('')
print(' Emission model')
print(' '+self.obs_distns[state].toString())
def total_summary(self, state_list):
print('Complete Model Summary')
print('###############################')
for state in range(len(state_list)):
self.summary(state, state_list)
print('')
self.plot_trans_distn(state_list)
@property
def num_parameters(self):
return sum(o.num_parameters() for o in self.obs_distns) \
+ sum(d.num_parameters() for d in self.dur_distns) \
+ self.num_states**2 - self.num_states
def plot_durations(self,colors=None,states_objs=None):
if colors is None:
colors = self._get_colors()
if states_objs is None:
states_objs = self.states_list
used_states = self.used_states
for state,d in enumerate(self.dur_distns):
if state in used_states:
d.plot(color=colors[state],
data=[s.durations[s.stateseq_norep == state]
for s in states_objs])
plt.title('Durations')
# def plot(self,color=None):
# plt.gcf() #.set_size_inches((10,10))
# colors = self._get_colors(self.states_list)
#
# num_subfig_cols = len(self.states_list)
# for subfig_idx,s in enumerate(self.states_list):
# plt.subplot(3,num_subfig_cols,1+subfig_idx)
# self.plot_observations(colors=colors,states_objs=[s])
#
# plt.subplot(3,num_subfig_cols,1+num_subfig_cols+subfig_idx)
# s.plot(colors_dict=colors)
#
# plt.subplot(3,num_subfig_cols,1+2*num_subfig_cols+subfig_idx)
# self.plot_durations(colors=colors,states_objs=[s])
class _HSMMGibbsSampling(_HSMMBase,_HMMGibbsSampling):
@line_profiled
def resample_parameters(self,**kwargs):
self.resample_dur_distns()
super(_HSMMGibbsSampling,self).resample_parameters(**kwargs)
def resample_dur_distns(self):
for state, distn in enumerate(self.dur_distns):
distn.resample_with_censoring_and_truncation(
data=
[s.durations_censored[s.untrunc_slice][s.stateseq_norep[s.untrunc_slice] == state]
for s in self.states_list],
censored_data=
[s.durations_censored[s.trunc_slice][s.stateseq_norep[s.trunc_slice] == state]
for s in self.states_list])
self._clear_caches()
def copy_sample(self):
new = super(_HSMMGibbsSampling,self).copy_sample()
new.dur_distns = [d.copy_sample() for d in self.dur_distns]
return new
class _HSMMEM(_HSMMBase,_HMMEM):
def _M_step(self):
super(_HSMMEM,self)._M_step()
self._M_step_dur_distns()
def _M_step_dur_distns(self):
for state, distn in enumerate(self.dur_distns):
distn.max_likelihood(
[np.arange(1,s.expected_durations[state].shape[0]+1)
for s in self.states_list],
[s.expected_durations[state] for s in self.states_list])
class _HSMMMeanField(_HSMMBase,_HMMMeanField):
def meanfield_update_parameters(self):
super(_HSMMMeanField,self).meanfield_update_parameters()
self.meanfield_update_dur_distns()
def meanfield_update_dur_distns(self):
for state, d in enumerate(self.dur_distns):
d.meanfieldupdate(
[np.arange(1,s.expected_durations[state].shape[0]+1)
for s in self.states_list],
[s.expected_durations[state] for s in self.states_list])
def vlb(self, **kwargs):
vlb = super(_HSMMMeanField,self).vlb(**kwargs)
vlb += sum(d.get_vlb() for d in self.dur_distns)
return vlb
class _HSMMSVI(_HSMMBase,_HMMSVI):
def _meanfield_sgdstep_parameters(self,mb_states_list,prob,stepsize):
super(_HSMMSVI,self)._meanfield_sgdstep_parameters(mb_states_list,prob,stepsize)
self._meanfield_sgdstep_dur_distns(mb_states_list,prob,stepsize)
def _meanfield_sgdstep_dur_distns(self,mb_states_list,prob,stepsize):
for state, d in enumerate(self.dur_distns):
d.meanfield_sgdstep(
[np.arange(1,s.expected_durations[state].shape[0]+1)
for s in mb_states_list],
[s.expected_durations[state] for s in mb_states_list],
prob,stepsize)
class _HSMMINBEMMixin(_HMMEM,ModelEM):
def EM_step(self):
super(_HSMMINBEMMixin,self).EM_step()
for state, distn in enumerate(self.dur_distns):
distn.max_likelihood(data=None,stats=(
sum(s.expected_dur_ns[state] for s in self.states_list),
sum(s.expected_dur_tots[state] for s in self.states_list)))
class _HSMMViterbiEM(_HSMMBase,_HMMViterbiEM):
def Viterbi_EM_step(self):
super(_HSMMViterbiEM,self).Viterbi_EM_step()
self._Viterbi_M_step_dur_distns()
def _Viterbi_M_step_dur_distns(self):
for state, distn in enumerate(self.dur_distns):
distn.max_likelihood(
[s.durations[s.stateseq_norep == state] for s in self.states_list])
def _Viterbi_M_step_trans_distn(self):
self.trans_distn.max_likelihood([s.stateseq_norep for s in self.states_list])
class _HSMMPossibleChangepointsMixin(_HMMPossibleChangepointsMixin):
_states_class = hsmm_states.HSMMStatesPossibleChangepoints
class _HSMMParallelTempering(_HSMMBase,_HMMParallelTempering):
def swap_sample_with(self,other):
self.dur_distns, other.dur_distns = other.dur_distns, self.dur_distns
super(_HSMMParallelTempering,self).swap_sample_with(other)
class _DelayedMixin(object):
def resample_dur_distns(self):
for state, distn in enumerate(self.dur_distns):
distn.resample_with_censoring_and_truncation(
data=
[s.durations_censored[s.untrunc_slice][s.stateseq_norep[s.untrunc_slice] == state]
- s.delays[state] for s in self.states_list],
censored_data=
[s.durations_censored[s.trunc_slice][s.stateseq_norep[s.trunc_slice] == state]
- s.delays[state] for s in self.states_list])
self._clear_caches()
#################
# HSMM Models #
#################
class HSMMPython(_HSMMGibbsSampling,_HSMMSVI,_HSMMMeanField,
_HSMMViterbiEM,_HSMMEM,_HSMMParallelTempering):
_trans_class = transitions.HSMMTransitions
_trans_conc_class = transitions.HSMMTransitionsConc
class HSMM(HSMMPython):
_states_class = hsmm_states.HSMMStatesEigen
class GeoHSMM(HSMMPython):
_states_class = hsmm_states.GeoHSMMStates
class DelayedGeoHSMM(_DelayedMixin,HSMMPython):
_states_class = hsmm_states.DelayedGeoHSMMStates
class WeakLimitHDPHSMMPython(_WeakLimitHDPMixin,HSMMPython):
# NOTE: shouldn't technically inherit EM or ViterbiEM, but it's convenient
_trans_class = transitions.WeakLimitHDPHSMMTransitions
_trans_conc_class = transitions.WeakLimitHDPHSMMTransitionsConc
class WeakLimitHDPHSMM(_WeakLimitHDPMixin,HSMM):
_trans_class = transitions.WeakLimitHDPHSMMTransitions
_trans_conc_class = transitions.WeakLimitHDPHSMMTransitionsConc
class WeakLimitGeoHDPHSMM(WeakLimitHDPHSMM):
_states_class = hsmm_states.GeoHSMMStates
def _M_step_dur_distns(self):
warn('untested!')
for state, distn in enumerate(self.dur_distns):
distn.max_likelihood(
stats=(
sum(s._expected_ns[state] for s in self.states_list),
sum(s._expected_tots[state] for s in self.states_list),
))
class WeakLimitDelayedGeoHSMM(_DelayedMixin,WeakLimitHDPHSMM):
_states_class = hsmm_states.DelayedGeoHSMMStates
class DATruncHDPHSMM(_WeakLimitHDPMixin,HSMM):
# NOTE: weak limit mixin is poorly named; we just want its init method
_trans_class = transitions.DATruncHDPHSMMTransitions
_trans_conc_class = None
class HSMMIntNegBin(_HSMMGibbsSampling,_HSMMMeanField,_HSMMSVI,_HSMMViterbiEM,
_HSMMParallelTempering):
_trans_class = transitions.HSMMTransitions
_trans_conc_class = transitions.HSMMTransitionsConc
_states_class = hsmm_inb_states.HSMMStatesIntegerNegativeBinomial
def _resample_from_mf(self):
super(HSMMIntNegBin,self)._resample_from_mf()
for d in self.dur_distns:
d._resample_from_mf()
def _vlb(self):
return 0. # TODO
def predictive_likelihoods(self,test_data,forecast_horizons,**kwargs):
self.add_data(data=test_data,**kwargs)
s = self.states_list.pop()
alphal = s.hmm_messages_forwards_log()
cmaxes = alphal.max(axis=1)
scaled_alphal = np.exp(alphal - cmaxes[:,None])
prev_k = 0
outs = []
for k in forecast_horizons:
step = k - prev_k
cmaxes = cmaxes[:-step]
scaled_alphal = scaled_alphal[:-step].dot(np.linalg.matrix_power(s.hmm_trans_matrix,step))
future_likelihoods = logsumexp(
np.log(scaled_alphal) + cmaxes[:,None] + s.hmm_aBl[k:],axis=1)
past_likelihoods = logsumexp(alphal[:-k],axis=1)
outs.append(future_likelihoods - past_likelihoods)
prev_k = k
return outs
class WeakLimitHDPHSMMIntNegBin(_WeakLimitHDPMixin,HSMMIntNegBin):
_trans_class = transitions.WeakLimitHDPHSMMTransitions
_trans_conc_class = transitions.WeakLimitHDPHSMMTransitionsConc
class HSMMIntNegBinVariant(_HSMMGibbsSampling,_HSMMINBEMMixin,_HSMMViterbiEM,
_HSMMParallelTempering):
_trans_class = transitions.HSMMTransitions
_trans_conc_class = transitions.HSMMTransitionsConc
_states_class = hsmm_inb_states.HSMMStatesIntegerNegativeBinomialVariant
class WeakLimitHDPHSMMIntNegBinVariant(_WeakLimitHDPMixin,HSMMIntNegBinVariant):
_trans_class = transitions.WeakLimitHDPHSMMTransitions
_trans_conc_class = transitions.WeakLimitHDPHSMMTransitionsConc
class GeoHSMMPossibleChangepoints(_HSMMPossibleChangepointsMixin,GeoHSMM):
pass
class HSMMPossibleChangepoints(_HSMMPossibleChangepointsMixin,HSMMPython):
pass
class WeakLimitHDPHSMMPossibleChangepoints(_HSMMPossibleChangepointsMixin,WeakLimitHDPHSMM):
pass
class WeakLimitHDPHSMMDelayedIntNegBin(_DelayedMixin,_WeakLimitHDPMixin,HSMMIntNegBin):
_states_class = hsmm_inb_states.HSMMStatesDelayedIntegerNegativeBinomial
_trans_class = transitions.WeakLimitHDPHSMMTransitions
_trans_conc_class = transitions.WeakLimitHDPHSMMTransitionsConc
def __init__(self,dur_distns,delay=0,**kwargs):
for d in dur_distns:
d.delay = delay
super(WeakLimitHDPHSMMDelayedIntNegBin,self).__init__(dur_distns=dur_distns,**kwargs)
class WeakLimitHDPHSMMTruncatedIntNegBin(_WeakLimitHDPMixin,HSMMIntNegBin):
_states_class = hsmm_inb_states.HSMMStatesTruncatedIntegerNegativeBinomial
_trans_class = transitions.WeakLimitHDPHSMMTransitions
_trans_conc_class = transitions.WeakLimitHDPHSMMTransitionsConc
def __init__(self,dur_distns,delay=0,**kwargs):
for d in dur_distns:
d.delay = delay
super(WeakLimitHDPHSMMTruncatedIntNegBin,self).__init__(dur_distns=dur_distns,**kwargs)
def resample_dur_distns(self):
for state, distn in enumerate(self.dur_distns):
distn.resample_with_censoring_and_truncation(
# regular data
data =
[s.durations_censored[s.untrunc_slice][s.stateseq_norep[s.untrunc_slice] == state]
for s in self.states_list],
# right censoring due to HSMM states
censored_data =
[s.durations_censored[s.trunc_slice][s.stateseq_norep[s.trunc_slice] == state]
for s in self.states_list],
# left truncation level
left_truncation_level = distn.delay,
)
self._clear_caches()
##########
# meta #
##########
class _SeparateTransMixin(object):
def __init__(self,*args,**kwargs):
super(_SeparateTransMixin,self).__init__(*args,**kwargs)
make_factory = (lambda distn: lambda: copy.deepcopy(distn))
self.trans_distns = collections.defaultdict(make_factory(self.trans_distn))
self._trans_distn_prototype = self.trans_distn
del self.trans_distn
self.init_state_distns = collections.defaultdict(make_factory(self.init_state_distn))
self._init_state_distn_prototype = self.init_state_distn
del self.init_state_distn
def __getstate__(self):
dct = self.__dict__.copy()
dct['trans_distns'] = dict(self.trans_distns.items())
dct['init_state_distns'] = dict(self.init_state_distns.items())
return dct
def __setstate__(self,dct):
self.__dict__.update(dct)
self.trans_distns = collections.defaultdict(
lambda: copy.deepcopy(self._trans_distn_prototype))
self.init_state_distns = collections.defaultdict(
lambda: copy.deepcopy(self._init_state_distn_prototype))
self.trans_distns.update(dct['trans_distns'])
self.init_state_distns.update(dct['init_state_distns'])
### parallel tempering
def swap_sample_with(self,other):
self.trans_distns, other.trans_distns = self.trans_distns, other.trans_distns
self.init_state_distns, other.init_state_distns = \
other.init_state_distns, self.init_state_distns
for d1, d2 in zip(self.init_state_distns.values(),other.init_state_distns.values()):
d1.model = self
d2.model = other
super(_SeparateTransMixin,self).swap_sample_with(other)
### Gibbs sampling
def resample_trans_distn(self):
for group_id, trans_distn in iteritems(self.trans_distns):
trans_distn.resample([s.stateseq for s in self.states_list
if hash(s.group_id) == hash(group_id)])
self._clear_caches()
def resample_init_state_distn(self):
for group_id, init_state_distn in iteritems(self.init_state_distns):
init_state_distn.resample([s.stateseq[0] for s in self.states_list
if hash(s.group_id) == hash(group_id)])
self._clear_caches()
### Mean field
def meanfield_update_trans_distn(self):
for group_id, trans_distn in iteritems(self.trans_distns):
states_list = [s for s in self.states_list if hash(s.group_id) == hash(group_id)]
if len(states_list) > 0:
trans_distn.meanfieldupdate([s.expected_transcounts for s in states_list])
def meanfield_update_init_state_distn(self):
for group_id, init_state_distn in iteritems(self.init_state_distns):
states_list = [s for s in self.states_list if hash(s.group_id) == hash(group_id)]
if len(states_list) > 0:
init_state_distn.meanfieldupdate([s.expected_states[0] for s in states_list])
def _vlb(self):
vlb = 0.
vlb += sum(s.get_vlb() for s in self.states_list)
vlb += sum(trans_distn.get_vlb()
for trans_distn in itervalues(self.trans_distns))
vlb += sum(init_state_distn.get_vlb()
for init_state_distn in itervalues(self.init_state_distns))
vlb += sum(o.get_vlb() for o in self.obs_distns)
return vlb
### SVI
def _meanfield_sgdstep_trans_distn(self,mb_states_list,prob,stepsize):
for group_id, trans_distn in iteritems(self.trans_distns):
trans_distn.meanfield_sgdstep(
[s.expected_transcounts for s in mb_states_list
if hash(s.group_id) == hash(group_id)],
prob,stepsize)
def _meanfield_sgdstep_init_state_distn(self,mb_states_list,prob,stepsize):
for group_id, init_state_distn in iteritems(self.init_state_distns):
init_state_distn.meanfield_sgdstep(
[s.expected_states[0] for s in mb_states_list
if hash(s.group_id) == hash(group_id)],
prob,stepsize)
### EM
def EM_step(self):
raise NotImplementedError
### Viterbi
def Viterbi_EM_step(self):
raise NotImplementedError
class HMMSeparateTrans(_SeparateTransMixin,HMM):
_states_class = hmm_states.HMMStatesEigenSeparateTrans
class WeakLimitHDPHMMSeparateTrans(_SeparateTransMixin,WeakLimitHDPHMM):
_states_class = hmm_states.HMMStatesEigenSeparateTrans
class WeakLimitStickyHDPHMMSeparateTrans(_SeparateTransMixin,WeakLimitStickyHDPHMM):
_states_class = hmm_states.HMMStatesEigenSeparateTrans
class WeakLimitHDPHSMMSeparateTrans(_SeparateTransMixin,WeakLimitHDPHSMM):
_states_class = hsmm_states.HSMMStatesSeparateTrans
class HSMMPossibleChangepointsSeparateTrans(
_SeparateTransMixin,
HSMMPossibleChangepoints):
_states_class = hsmm_states.HSMMStatesPossibleChangepointsSeparateTrans
class WeakLimitHDPHSMMPossibleChangepointsSeparateTrans(
_SeparateTransMixin,
WeakLimitHDPHSMMPossibleChangepoints):
_states_class = hsmm_states.HSMMStatesPossibleChangepointsSeparateTrans
# class WeakLimitHDPHSMMPossibleChangepointsSeparateTrans(
# _SeparateTransMixin,
# WeakLimitHDPHSMMPossibleChangepoints):
# _states_class = hsmm_states.HSMMStatesPossibleChangepointsSeparateTrans
class WeakLimitHDPHSMMIntNegBinSeparateTrans(
_SeparateTransMixin,
WeakLimitHDPHSMMIntNegBin):
_states_class = hsmm_inb_states.HSMMStatesIntegerNegativeBinomialSeparateTrans
class WeakLimitHDPHSMMDelayedIntNegBinSeparateTrans(
_SeparateTransMixin,
WeakLimitHDPHSMMDelayedIntNegBin):
_states_class = hsmm_inb_states.HSMMStatesDelayedIntegerNegativeBinomialSeparateTrans
# TODO is this method needed?
def resample_dur_distns(self):
for state, distn in enumerate(self.dur_distns):
distn.resample_with_censoring_and_truncation(
data=
[s.durations_censored[s.untrunc_slice][s.stateseq_norep[s.untrunc_slice] == state]
- s.delays[state] for s in self.states_list],
censored_data=
[s.durations_censored[s.trunc_slice][s.stateseq_norep[s.trunc_slice] == state]
- s.delays[state] for s in self.states_list])
self._clear_caches()
class WeakLimitHDPHSMMTruncatedIntNegBinSeparateTrans(
_SeparateTransMixin,
WeakLimitHDPHSMMTruncatedIntNegBin):
_states_class = hsmm_inb_states.HSMMStatesTruncatedIntegerNegativeBinomialSeparateTrans
|
[
"matplotlib.pyplot.title",
"matplotlib.cm.get_cmap",
"numpy.ones",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.exp",
"scipy.special.logsumexp",
"matplotlib.pyplot.gca",
"numpy.atleast_2d",
"pybasicbayes.util.stats.atleast_2d",
"joblib.Parallel",
"matplotlib.pyplot.draw",
"numpy.linspace",
"numpy.bincount",
"pyhsmm.util.general.rle",
"future.utils.iteritems",
"matplotlib.pyplot.subplots",
"matplotlib.collections.LineCollection",
"copy.deepcopy",
"matplotlib.pyplot.show",
"future.utils.itervalues",
"numpy.asarray",
"itertools.count",
"numpy.hstack",
"pyhsmm.util.general.list_split",
"numpy.linalg.matrix_power",
"joblib.delayed",
"matplotlib.pyplot.subplot",
"numpy.log",
"pyhsmm.util.plot.heatmap",
"copy.copy",
"pybasicbayes.distributions.gaussian.Gaussian",
"numpy.array",
"pyhsmm.util.plot.annotate_heatmap",
"builtins.zip",
"pyhsmm.internals.initial_state.UniformInitialState",
"warnings.warn",
"matplotlib.gridspec.GridSpec",
"operator.itemgetter"
] |
[((2496, 2510), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2508, 2510), True, 'import matplotlib.pyplot as plt\n'), ((2577, 2678), 'pyhsmm.util.plot.heatmap', 'heatmap', (['tmat', 'states_list', 'states_list'], {'ax': 'ax', 'cmap': '"""Blues"""', 'cbarlabel': '"""Transition probability"""'}), "(tmat, states_list, states_list, ax=ax, cmap='Blues', cbarlabel=\n 'Transition probability')\n", (2584, 2678), False, 'from pyhsmm.util.plot import annotate_heatmap, heatmap\n'), ((2717, 2756), 'pyhsmm.util.plot.annotate_heatmap', 'annotate_heatmap', (['im'], {'valfmt': '"""{x:.2f} """'}), "(im, valfmt='{x:.2f} ')\n", (2733, 2756), False, 'from pyhsmm.util.plot import annotate_heatmap, heatmap\n'), ((2793, 2803), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2801, 2803), True, 'import matplotlib.pyplot as plt\n'), ((5522, 5554), 'numpy.exp', 'np.exp', (['(alphal - cmaxes[:, None])'], {}), '(alphal - cmaxes[:, None])\n', (5528, 5554), True, 'import numpy as np\n'), ((7399, 7416), 'itertools.count', 'itertools.count', ([], {}), '()\n', (7414, 7416), False, 'import itertools\n'), ((11808, 11841), 'builtins.zip', 'zip', (['self.states_list', 'self.datas'], {}), '(self.states_list, self.datas)\n', (11811, 11841), False, 'from builtins import map, zip\n'), ((15743, 15756), 'pyhsmm.util.general.rle', 'rle', (['stateseq'], {}), '(stateseq)\n', (15746, 15756), False, 'from pyhsmm.util.general import AR_striding, rle\n'), ((15898, 15962), 'numpy.atleast_2d', 'np.atleast_2d', (['[state_colors[state] for state in stateseq_norep]'], {}), '([state_colors[state] for state in stateseq_norep])\n', (15911, 15962), True, 'import numpy as np\n'), ((18200, 18215), 'copy.copy', 'copy.copy', (['self'], {}), '(self)\n', (18209, 18215), False, 'import copy\n'), ((30659, 30687), 'builtins.zip', 'zip', (['minibatch', 'changepoints'], {}), '(minibatch, changepoints)\n', (30662, 30687), False, 'from builtins import map, zip\n'), ((32619, 32659), 'builtins.zip', 'zip', (['self.states_list', 'other.states_list'], {}), '(self.states_list, other.states_list)\n', (32622, 32659), False, 'from builtins import map, zip\n'), ((37465, 37487), 'matplotlib.pyplot.title', 'plt.title', (['"""Durations"""'], {}), "('Durations')\n", (37474, 37487), True, 'import matplotlib.pyplot as plt\n'), ((43791, 43808), 'warnings.warn', 'warn', (['"""untested!"""'], {}), "('untested!')\n", (43795, 43808), False, 'from warnings import warn\n'), ((45196, 45228), 'numpy.exp', 'np.exp', (['(alphal - cmaxes[:, None])'], {}), '(alphal - cmaxes[:, None])\n', (45202, 45228), True, 'import numpy as np\n'), ((50302, 50330), 'future.utils.iteritems', 'iteritems', (['self.trans_distns'], {}), '(self.trans_distns)\n', (50311, 50330), False, 'from future.utils import iteritems, itervalues\n'), ((50572, 50605), 'future.utils.iteritems', 'iteritems', (['self.init_state_distns'], {}), '(self.init_state_distns)\n', (50581, 50605), False, 'from future.utils import iteritems, itervalues\n'), ((50873, 50901), 'future.utils.iteritems', 'iteritems', (['self.trans_distns'], {}), '(self.trans_distns)\n', (50882, 50901), False, 'from future.utils import iteritems, itervalues\n'), ((51217, 51250), 'future.utils.iteritems', 'iteritems', (['self.init_state_distns'], {}), '(self.init_state_distns)\n', (51226, 51250), False, 'from future.utils import iteritems, itervalues\n'), ((52004, 52032), 'future.utils.iteritems', 'iteritems', (['self.trans_distns'], {}), '(self.trans_distns)\n', (52013, 52032), False, 'from future.utils import iteritems, itervalues\n'), ((52367, 52400), 'future.utils.iteritems', 'iteritems', (['self.init_state_distns'], {}), '(self.init_state_distns)\n', (52376, 52400), False, 'from future.utils import iteritems, itervalues\n'), ((3511, 3561), 'numpy.bincount', 'np.bincount', (['s.stateseq'], {'minlength': 'self.num_states'}), '(s.stateseq, minlength=self.num_states)\n', (3522, 3561), True, 'import numpy as np\n'), ((3894, 3953), 'numpy.bincount', 'np.bincount', (['s.stateseq[nan_idx]'], {'minlength': 'self.num_states'}), '(s.stateseq[nan_idx], minlength=self.num_states)\n', (3905, 3953), True, 'import numpy as np\n'), ((4063, 4096), 'builtins.zip', 'zip', (['nan_idx', 's.stateseq[nan_idx]'], {}), '(nan_idx, s.stateseq[nan_idx])\n', (4066, 4096), False, 'from builtins import map, zip\n'), ((8009, 8033), 'numpy.ones', 'np.ones', (['self.num_states'], {}), '(self.num_states)\n', (8016, 8033), True, 'import numpy as np\n'), ((9012, 9050), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'fig_size'}), '(figsize=fig_size, **kwargs)\n', (9022, 9050), True, 'import matplotlib.pyplot as plt\n'), ((9082, 9120), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'fig_size'}), '(figsize=fig_size, **kwargs)\n', (9092, 9120), True, 'import matplotlib.pyplot as plt\n'), ((9843, 9853), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (9851, 9853), True, 'import matplotlib.pyplot as plt\n'), ((11090, 11099), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (11097, 11099), True, 'import matplotlib.pyplot as plt\n'), ((11677, 11686), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (11684, 11686), True, 'import matplotlib.pyplot as plt\n'), ((12512, 12521), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (12519, 12521), True, 'import matplotlib.pyplot as plt\n'), ((12720, 12748), 'builtins.zip', 'zip', (['self.obs_distns', 'usages'], {}), '(self.obs_distns, usages)\n', (12723, 12748), False, 'from builtins import map, zip\n'), ((13456, 13476), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""tab20"""'], {}), "('tab20')\n", (13467, 13476), False, 'from matplotlib import cm\n'), ((14049, 14083), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'self.num_states'], {}), '(0, 1, self.num_states)\n', (14060, 14083), True, 'import numpy as np\n'), ((14728, 14737), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (14735, 14737), True, 'import matplotlib.pyplot as plt\n'), ((15062, 15072), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (15070, 15072), True, 'import matplotlib.pyplot as plt\n'), ((15375, 15384), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (15382, 15384), True, 'import matplotlib.pyplot as plt\n'), ((15858, 15886), 'numpy.array', 'np.array', (['[datamin, datamax]'], {}), '([datamin, datamax])\n', (15866, 15886), True, 'import numpy as np\n'), ((16475, 16533), 'numpy.array', 'np.array', (['[state_colors[state] for state in stateseq[:-1]]'], {}), '([state_colors[state] for state in stateseq[:-1]])\n', (16483, 16533), True, 'import numpy as np\n'), ((16895, 16919), 'matplotlib.collections.LineCollection', 'LineCollection', (['segments'], {}), '(segments)\n', (16909, 16919), False, 'from matplotlib.collections import LineCollection\n'), ((32879, 32902), 'builtins.zip', 'zip', (['s.stateseq', 's.data'], {}), '(s.stateseq, s.data)\n', (32882, 32902), False, 'from builtins import map, zip\n'), ((45630, 45660), 'scipy.special.logsumexp', 'logsumexp', (['alphal[:-k]'], {'axis': '(1)'}), '(alphal[:-k], axis=1)\n', (45639, 45660), False, 'from scipy.special import logsumexp\n'), ((2073, 2118), 'pyhsmm.internals.initial_state.UniformInitialState', 'initial_state.UniformInitialState', ([], {'model': 'self'}), '(model=self)\n', (2106, 2118), False, 'from pyhsmm.internals import hmm_states, hsmm_states, hsmm_inb_states, initial_state, transitions\n'), ((6006, 6036), 'scipy.special.logsumexp', 'logsumexp', (['alphal[:-k]'], {'axis': '(1)'}), '(alphal[:-k], axis=1)\n', (6015, 6036), False, 'from scipy.special import logsumexp\n'), ((6459, 6512), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'num_procs', 'backend': '"""multiprocessing"""'}), "(n_jobs=num_procs, backend='multiprocessing')\n", (6467, 6512), False, 'from joblib import Parallel, delayed\n'), ((7615, 7637), 'operator.itemgetter', 'operator.itemgetter', (['(0)'], {}), '(0)\n', (7634, 7637), False, 'import operator\n'), ((9687, 9734), 'builtins.zip', 'zip', (['self.states_list', 'stateseq_axs', 'self.datas'], {}), '(self.states_list, stateseq_axs, self.datas)\n', (9690, 9734), False, 'from builtins import map, zip\n'), ((10380, 10403), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[:sz, :]'], {}), '(gs[:sz, :])\n', (10391, 10403), True, 'import matplotlib.pyplot as plt\n'), ((10539, 10553), 'matplotlib.gridspec.GridSpec', 'GridSpec', (['(1)', '(2)'], {}), '(1, 2)\n', (10547, 10553), False, 'from matplotlib.gridspec import GridSpec, GridSpecFromSubplotSpec\n'), ((10673, 10691), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0]'], {}), '(gs[0])\n', (10684, 10691), True, 'import matplotlib.pyplot as plt\n'), ((19022, 19075), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'num_procs', 'backend': '"""multiprocessing"""'}), "(n_jobs=num_procs, backend='multiprocessing')\n", (19030, 19075), False, 'from joblib import Parallel, delayed\n'), ((22220, 22273), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'num_procs', 'backend': '"""multiprocessing"""'}), "(n_jobs=num_procs, backend='multiprocessing')\n", (22228, 22273), False, 'from joblib import Parallel, delayed\n'), ((28334, 28385), 'numpy.array', 'np.array', (['[s.stateseq[0] for s in self.states_list]'], {}), '([s.stateseq[0] for s in self.states_list])\n', (28342, 28385), True, 'import numpy as np\n'), ((31606, 31629), 'builtins.zip', 'zip', (['data', 'changepoints'], {}), '(data, changepoints)\n', (31609, 31629), False, 'from builtins import map, zip\n'), ((45422, 45470), 'numpy.linalg.matrix_power', 'np.linalg.matrix_power', (['s.hmm_trans_matrix', 'step'], {}), '(s.hmm_trans_matrix, step)\n', (45444, 45470), True, 'import numpy as np\n'), ((48674, 48694), 'copy.deepcopy', 'copy.deepcopy', (['distn'], {}), '(distn)\n', (48687, 48694), False, 'import copy\n'), ((49420, 49462), 'copy.deepcopy', 'copy.deepcopy', (['self._trans_distn_prototype'], {}), '(self._trans_distn_prototype)\n', (49433, 49462), False, 'import copy\n'), ((49546, 49593), 'copy.deepcopy', 'copy.deepcopy', (['self._init_state_distn_prototype'], {}), '(self._init_state_distn_prototype)\n', (49559, 49593), False, 'import copy\n'), ((3615, 3640), 'builtins.zip', 'zip', (['s.obs_distns', 'counts'], {}), '(s.obs_distns, counts)\n', (3618, 3640), False, 'from builtins import map, zip\n'), ((4007, 4032), 'builtins.zip', 'zip', (['s.obs_distns', 'counts'], {}), '(s.obs_distns, counts)\n', (4010, 4032), False, 'from builtins import map, zip\n'), ((4986, 5003), 'numpy.ones', 'np.ones', (['padshape'], {}), '(padshape)\n', (4993, 5003), True, 'import numpy as np\n'), ((5798, 5842), 'numpy.linalg.matrix_power', 'np.linalg.matrix_power', (['s.trans_matrix', 'step'], {}), '(s.trans_matrix, step)\n', (5820, 5842), True, 'import numpy as np\n'), ((7826, 7876), 'numpy.bincount', 'np.bincount', (['s.stateseq'], {'minlength': 'self.num_states'}), '(s.stateseq, minlength=self.num_states)\n', (7837, 7876), True, 'import numpy as np\n'), ((10435, 10460), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[sz + idx]'], {}), '(gs[sz + idx])\n', (10446, 10460), True, 'import matplotlib.pyplot as plt\n'), ((10724, 10745), 'matplotlib.pyplot.subplot', 'plt.subplot', (['sgs[idx]'], {}), '(sgs[idx])\n', (10735, 10745), True, 'import matplotlib.pyplot as plt\n'), ((12842, 12877), 'pybasicbayes.distributions.gaussian.Gaussian', 'Gaussian', (['o.mu[:2]', 'o.sigma[:2, :2]'], {}), '(o.mu[:2], o.sigma[:2, :2])\n', (12850, 12877), False, 'from pybasicbayes.distributions.gaussian import Gaussian\n'), ((12920, 12986), 'warnings.warn', 'warn', (['"""High-dimensional distribution may not plot correctly in 2D"""'], {}), "('High-dimensional distribution may not plot correctly in 2D')\n", (12924, 12986), False, 'from warnings import warn\n'), ((27408, 27429), 'numpy.log', 'np.log', (['data.shape[0]'], {}), '(data.shape[0])\n', (27414, 27429), True, 'import numpy as np\n'), ((35851, 35867), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (35861, 35867), True, 'import numpy as np\n'), ((39304, 39358), 'numpy.arange', 'np.arange', (['(1)', '(s.expected_durations[state].shape[0] + 1)'], {}), '(1, s.expected_durations[state].shape[0] + 1)\n', (39313, 39358), True, 'import numpy as np\n'), ((39833, 39887), 'numpy.arange', 'np.arange', (['(1)', '(s.expected_durations[state].shape[0] + 1)'], {}), '(1, s.expected_durations[state].shape[0] + 1)\n', (39842, 39887), True, 'import numpy as np\n'), ((40629, 40683), 'numpy.arange', 'np.arange', (['(1)', '(s.expected_durations[state].shape[0] + 1)'], {}), '(1, s.expected_durations[state].shape[0] + 1)\n', (40638, 40683), True, 'import numpy as np\n'), ((51649, 51678), 'future.utils.itervalues', 'itervalues', (['self.trans_distns'], {}), '(self.trans_distns)\n', (51659, 51678), False, 'from future.utils import iteritems, itervalues\n'), ((51766, 51800), 'future.utils.itervalues', 'itervalues', (['self.init_state_distns'], {}), '(self.init_state_distns)\n', (51776, 51800), False, 'from future.utils import iteritems, itervalues\n'), ((6534, 6579), 'joblib.delayed', 'delayed', (['parallel._get_predictive_likelihoods'], {}), '(parallel._get_predictive_likelihoods)\n', (6541, 6579), False, 'from joblib import Parallel, delayed\n'), ((7688, 7710), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (7707, 7710), False, 'import operator\n'), ((19097, 19136), 'joblib.delayed', 'delayed', (['parallel._get_sampled_stateseq'], {}), '(parallel._get_sampled_stateseq)\n', (19104, 19136), False, 'from joblib import Parallel, delayed\n'), ((19295, 19329), 'pyhsmm.util.general.list_split', 'list_split', (['states_list', 'num_procs'], {}), '(states_list, num_procs)\n', (19305, 19329), False, 'from pyhsmm.util.general import list_split\n'), ((22295, 22323), 'joblib.delayed', 'delayed', (['parallel._get_stats'], {}), '(parallel._get_stats)\n', (22302, 22323), False, 'from joblib import Parallel, delayed\n'), ((22433, 22456), 'pyhsmm.util.general.list_split', 'list_split', (['states_list'], {}), '(states_list)\n', (22443, 22456), False, 'from pyhsmm.util.general import list_split\n'), ((45536, 45557), 'numpy.log', 'np.log', (['scaled_alphal'], {}), '(scaled_alphal)\n', (45542, 45557), True, 'import numpy as np\n'), ((3847, 3863), 'pybasicbayes.util.stats.atleast_2d', 'atleast_2d', (['data'], {}), '(data)\n', (3857, 3863), False, 'from pybasicbayes.util.stats import atleast_2d\n'), ((5912, 5933), 'numpy.log', 'np.log', (['scaled_alphal'], {}), '(scaled_alphal)\n', (5918, 5933), True, 'import numpy as np\n'), ((16757, 16801), 'numpy.hstack', 'np.hstack', (['(ts[:, None], scalarseq[:, None])'], {}), '((ts[:, None], scalarseq[:, None]))\n', (16766, 16801), True, 'import numpy as np\n')]
|
from aoc_wim.aoc2016 import q16
padding = """\
1 becomes 100.
0 becomes 001.
11111 becomes 11111000000.
111100001010 becomes 1111000010100101011110000.
"""
def test_padding():
for line in padding.splitlines():
left, right = line.rstrip(".").split(" becomes ")
assert q16.pad(left, n=len(right)) == right
def test_checksum():
data = "110010110100"
assert q16.f(data, n=12) == "100"
def test_len20_disk():
data = "10000"
n = 20
assert q16.f(data, n) == "01100"
|
[
"aoc_wim.aoc2016.q16.f"
] |
[((388, 405), 'aoc_wim.aoc2016.q16.f', 'q16.f', (['data'], {'n': '(12)'}), '(data, n=12)\n', (393, 405), False, 'from aoc_wim.aoc2016 import q16\n'), ((481, 495), 'aoc_wim.aoc2016.q16.f', 'q16.f', (['data', 'n'], {}), '(data, n)\n', (486, 495), False, 'from aoc_wim.aoc2016 import q16\n')]
|
from .views import DriverViewSet, VehicleViewSet
from django.urls import include, path
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register('driver', DriverViewSet, basename='driver')
router.register('vehicle', VehicleViewSet, basename='vehicle')
urlpatterns = [
path('', include(router.urls))
]
|
[
"rest_framework.routers.DefaultRouter",
"django.urls.include"
] |
[((147, 162), 'rest_framework.routers.DefaultRouter', 'DefaultRouter', ([], {}), '()\n', (160, 162), False, 'from rest_framework.routers import DefaultRouter\n'), ((315, 335), 'django.urls.include', 'include', (['router.urls'], {}), '(router.urls)\n', (322, 335), False, 'from django.urls import include, path\n')]
|
import os
import tensorflow as tf
import numpy as np
import quaternion
import datetime
import time
def test_linspace():
# tf ops must take float variables
# better use np.linspace instead
x = tf.linspace(0., 3., 4)
print("linspace", x)
def test_gather():
coords = tf.tile(tf.expand_dims(tf.linspace(0., 10., 11), 1), (1, 3))
# print(coords)
indices = tf.cast(tf.linspace(0., 10., 6), tf.int32)
extracted = tf.gather(coords, indices)
# print(extracted)
assert (np.isclose(extracted[:, 0].numpy(), indices.numpy()).all())
print("!!! test_gather passed")
def test_pad():
img = tf.ones((4, 5, 3), dtype=tf.float32)
# print("original channel 0", img[:, :, 0])
paddings = tf.constant([[1, 1], [1, 1], [0, 0]])
pad = tf.pad(img, paddings, "CONSTANT")
# print("paddings", paddings)
# print("padded shape:", pad.shape)
print("padded channel 0", pad[:, :, 1])
def test_rotation_vector():
quat = quaternion.from_float_array(np.array([np.cos(np.pi/3), 0, np.sin(np.pi/3), 0]))
print("quaterion angle pi*2/3 about y-axis", quat)
rvec = quaternion.as_rotation_vector(quat)
print("rotation vector:", rvec)
assert (np.isclose(np.linalg.norm(rvec), np.pi*2/3))
print("!!! test_rotation_vector passed")
def test_time():
nowtime = datetime.datetime.now()
print("nowtime", nowtime)
print("formatted time", nowtime.strftime("%m%d_%H%M%S"))
print("asctime", time.asctime())
def test_casting():
data = "1.1"
try:
data = int(data)
except Exception as e:
print(e)
print(type(e))
print(str(e))
def test():
np.set_printoptions(precision=3, suppress=True)
test_linspace()
test_gather()
test_pad()
test_rotation_vector()
test_time()
test_casting()
if __name__ == "__main__":
test()
|
[
"time.asctime",
"tensorflow.ones",
"numpy.set_printoptions",
"tensorflow.linspace",
"tensorflow.gather",
"tensorflow.pad",
"tensorflow.constant",
"numpy.sin",
"numpy.linalg.norm",
"quaternion.as_rotation_vector",
"numpy.cos",
"datetime.datetime.now"
] |
[((206, 230), 'tensorflow.linspace', 'tf.linspace', (['(0.0)', '(3.0)', '(4)'], {}), '(0.0, 3.0, 4)\n', (217, 230), True, 'import tensorflow as tf\n'), ((442, 468), 'tensorflow.gather', 'tf.gather', (['coords', 'indices'], {}), '(coords, indices)\n', (451, 468), True, 'import tensorflow as tf\n'), ((628, 664), 'tensorflow.ones', 'tf.ones', (['(4, 5, 3)'], {'dtype': 'tf.float32'}), '((4, 5, 3), dtype=tf.float32)\n', (635, 664), True, 'import tensorflow as tf\n'), ((728, 765), 'tensorflow.constant', 'tf.constant', (['[[1, 1], [1, 1], [0, 0]]'], {}), '([[1, 1], [1, 1], [0, 0]])\n', (739, 765), True, 'import tensorflow as tf\n'), ((776, 809), 'tensorflow.pad', 'tf.pad', (['img', 'paddings', '"""CONSTANT"""'], {}), "(img, paddings, 'CONSTANT')\n", (782, 809), True, 'import tensorflow as tf\n'), ((1115, 1150), 'quaternion.as_rotation_vector', 'quaternion.as_rotation_vector', (['quat'], {}), '(quat)\n', (1144, 1150), False, 'import quaternion\n'), ((1322, 1345), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1343, 1345), False, 'import datetime\n'), ((1654, 1701), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(3)', 'suppress': '(True)'}), '(precision=3, suppress=True)\n', (1673, 1701), True, 'import numpy as np\n'), ((391, 416), 'tensorflow.linspace', 'tf.linspace', (['(0.0)', '(10.0)', '(6)'], {}), '(0.0, 10.0, 6)\n', (402, 416), True, 'import tensorflow as tf\n'), ((1210, 1230), 'numpy.linalg.norm', 'np.linalg.norm', (['rvec'], {}), '(rvec)\n', (1224, 1230), True, 'import numpy as np\n'), ((1458, 1472), 'time.asctime', 'time.asctime', ([], {}), '()\n', (1470, 1472), False, 'import time\n'), ((311, 337), 'tensorflow.linspace', 'tf.linspace', (['(0.0)', '(10.0)', '(11)'], {}), '(0.0, 10.0, 11)\n', (322, 337), True, 'import tensorflow as tf\n'), ((1007, 1024), 'numpy.cos', 'np.cos', (['(np.pi / 3)'], {}), '(np.pi / 3)\n', (1013, 1024), True, 'import numpy as np\n'), ((1027, 1044), 'numpy.sin', 'np.sin', (['(np.pi / 3)'], {}), '(np.pi / 3)\n', (1033, 1044), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2022 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# <NAME> <<EMAIL>>
#
# *****************************************************************************
from nicos import session
from nicos.core import Override, Param, Readable, status
from nicos.core.errors import ConfigurationError
class IEEEDevice(Readable):
"""Special device to put arbitrary device values/parameters into the
BerSANS "IEEE" header fields (to be able to use them during analysis).
"""
parameters = {
'valuename': Param('Device ("dev") or parameter ("dev.param") '
'to return on read', type=str, settable=True,
unit='', category='general'),
}
parameter_overrides = {
'unit': Override(mandatory=False, default='', settable=False,
category='general'),
}
hardware_access = False
def doWriteValuename(self, valuename):
if valuename and '.' not in valuename:
if self._cache:
unit = self._cache.get(valuename, 'unit', '')
else:
unit = getattr(session.getDevice(valuename), 'unit', '')
else:
try:
devname, parname = valuename.rsplit('.', 1)
dev = session.getDevice(devname)
devunit = getattr(dev, 'unit', '')
unit = dev._getParamConfig(parname).unit or ''
if devunit:
unit = unit.replace('main', devunit)
except ConfigurationError:
unit = ''
self._setROParam('unit', unit)
def doStatus(self, maxage=0):
if not self.valuename:
return status.OK, ''
devname = self.valuename.rsplit('.', 1)[0]
if self._cache:
return self._cache.get(devname, 'status')
return session.getDevice(devname).status(maxage)
def doRead(self, maxage=0):
if not self.valuename:
return ''
if '.' in self.valuename:
devname, parname = self.valuename.rsplit('.', 1)
if self._cache:
return self._cache.get(devname, parname)
return getattr(session.getDevice(devname), parname)
if self._cache:
return self._cache.get(self.valuename, 'value')
return session.getDevice(self.valuename).read(maxage)
|
[
"nicos.session.getDevice",
"nicos.core.Override",
"nicos.core.Param"
] |
[((1416, 1543), 'nicos.core.Param', 'Param', (['"""Device ("dev") or parameter ("dev.param") to return on read"""'], {'type': 'str', 'settable': '(True)', 'unit': '""""""', 'category': '"""general"""'}), '(\'Device ("dev") or parameter ("dev.param") to return on read\', type=\n str, settable=True, unit=\'\', category=\'general\')\n', (1421, 1543), False, 'from nicos.core import Override, Param, Readable, status\n'), ((1647, 1720), 'nicos.core.Override', 'Override', ([], {'mandatory': '(False)', 'default': '""""""', 'settable': '(False)', 'category': '"""general"""'}), "(mandatory=False, default='', settable=False, category='general')\n", (1655, 1720), False, 'from nicos.core import Override, Param, Readable, status\n'), ((2167, 2193), 'nicos.session.getDevice', 'session.getDevice', (['devname'], {}), '(devname)\n', (2184, 2193), False, 'from nicos import session\n'), ((2741, 2767), 'nicos.session.getDevice', 'session.getDevice', (['devname'], {}), '(devname)\n', (2758, 2767), False, 'from nicos import session\n'), ((3077, 3103), 'nicos.session.getDevice', 'session.getDevice', (['devname'], {}), '(devname)\n', (3094, 3103), False, 'from nicos import session\n'), ((3213, 3246), 'nicos.session.getDevice', 'session.getDevice', (['self.valuename'], {}), '(self.valuename)\n', (3230, 3246), False, 'from nicos import session\n'), ((2012, 2040), 'nicos.session.getDevice', 'session.getDevice', (['valuename'], {}), '(valuename)\n', (2029, 2040), False, 'from nicos import session\n')]
|
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import logging
import pytest
import requests
from six import iteritems
from datadog_checks.elastic import ESCheck
from datadog_checks.elastic.config import from_instance
from datadog_checks.elastic.metrics import (
CLUSTER_PENDING_TASKS,
STATS_METRICS,
ADDITIONAL_METRICS_1_x,
health_stats_for_version,
index_stats_for_version,
pshard_stats_for_version,
stats_for_version,
)
from .common import CLUSTER_TAG, PASSWORD, URL, USER
log = logging.getLogger('test_elastic')
def test__join_url(elastic_check):
adm_forwarder_joined_url = elastic_check._join_url(
"https://localhost:9444/elasticsearch-admin", "/stats", admin_forwarder=True
)
assert adm_forwarder_joined_url == "https://localhost:9444/elasticsearch-admin/stats"
joined_url = elastic_check._join_url("https://localhost:9444/elasticsearch-admin", "/stats")
assert joined_url == "https://localhost:9444/stats"
def test__get_urls(elastic_check):
health_url, stats_url, pshard_stats_url, pending_tasks_url = elastic_check._get_urls([], True)
assert health_url == '/_cluster/health'
assert stats_url == '/_cluster/nodes/stats?all=true'
assert pshard_stats_url == '/_stats'
assert pending_tasks_url is None
health_url, stats_url, pshard_stats_url, pending_tasks_url = elastic_check._get_urls([], False)
assert health_url == '/_cluster/health'
assert stats_url == '/_cluster/nodes/_local/stats?all=true'
assert pshard_stats_url == '/_stats'
assert pending_tasks_url is None
health_url, stats_url, pshard_stats_url, pending_tasks_url = elastic_check._get_urls([1, 0, 0], True)
assert health_url == '/_cluster/health'
assert stats_url == '/_nodes/stats?all=true'
assert pshard_stats_url == '/_stats'
assert pending_tasks_url == '/_cluster/pending_tasks'
health_url, stats_url, pshard_stats_url, pending_tasks_url = elastic_check._get_urls([1, 0, 0], False)
assert health_url == '/_cluster/health'
assert stats_url == '/_nodes/_local/stats?all=true'
assert pshard_stats_url == '/_stats'
assert pending_tasks_url == '/_cluster/pending_tasks'
health_url, stats_url, pshard_stats_url, pending_tasks_url = elastic_check._get_urls([6, 0, 0], True)
assert health_url == '/_cluster/health'
assert stats_url == '/_nodes/stats'
assert pshard_stats_url == '/_stats'
assert pending_tasks_url == '/_cluster/pending_tasks'
health_url, stats_url, pshard_stats_url, pending_tasks_url = elastic_check._get_urls([6, 0, 0], False)
assert health_url == '/_cluster/health'
assert stats_url == '/_nodes/_local/stats'
assert pshard_stats_url == '/_stats'
assert pending_tasks_url == '/_cluster/pending_tasks'
def test_check(dd_environment, elastic_check, instance, aggregator, cluster_tags, node_tags):
elastic_check.check(instance)
_test_check(elastic_check, instance, aggregator, cluster_tags, node_tags)
def _test_check(elastic_check, instance, aggregator, cluster_tags, node_tags):
config = from_instance(instance)
es_version = elastic_check._get_es_version(config)
# node stats, blacklist metrics that can't be tested in a small, single node instance
blacklist = ['elasticsearch.indices.segments.index_writer_max_memory_in_bytes']
blacklist.extend(ADDITIONAL_METRICS_1_x)
for m_name in stats_for_version(es_version):
if m_name in blacklist:
continue
aggregator.assert_metric(m_name, at_least=1, tags=node_tags)
# cluster stats
expected_metrics = health_stats_for_version(es_version)
expected_metrics.update(CLUSTER_PENDING_TASKS)
for m_name in expected_metrics:
aggregator.assert_metric(m_name, at_least=1, tags=cluster_tags)
aggregator.assert_service_check('elasticsearch.can_connect', status=ESCheck.OK, tags=config.service_check_tags)
# Assert service metadata
# self.assertServiceMetadata(['version'], count=3)
# FIXME: 0.90.13 returns randomly a red status instead of yellow,
# so we don't do a coverage test for it
# Remove me when we stop supporting 0.90.x (not supported anymore by ES)
if es_version != [0, 90, 13]:
# Warning because elasticsearch status should be yellow, according to
# http://chrissimpson.co.uk/elasticsearch-yellow-cluster-status-explained.html
aggregator.assert_service_check('elasticsearch.cluster_health')
def test_node_name_as_host(dd_environment, elastic_check, instance_normalize_hostname, aggregator, node_tags):
elastic_check.check(instance_normalize_hostname)
node_name = node_tags[-1].split(':')[1]
for m_name, _ in iteritems(STATS_METRICS):
aggregator.assert_metric(m_name, count=1, tags=node_tags, hostname=node_name)
def test_pshard_metrics(dd_environment, elastic_check, aggregator):
instance = {'url': URL, 'pshard_stats': True, 'username': USER, 'password': PASSWORD}
config = from_instance(instance)
es_version = elastic_check._get_es_version(config)
elastic_check.check(instance)
pshard_stats_metrics = pshard_stats_for_version(es_version)
for m_name, desc in iteritems(pshard_stats_metrics):
if desc[0] == 'gauge':
aggregator.assert_metric(m_name)
# Our pshard metrics are getting sent, let's check that they're accurate
# Note: please make sure you don't install Maven on the CI for future
# elastic search CI integrations. It would make the line below fail :/
aggregator.assert_metric('elasticsearch.primaries.docs.count')
def test_index_metrics(dd_environment, aggregator, elastic_check, instance, cluster_tags):
instance['index_stats'] = True
config = from_instance(instance)
es_version = elastic_check._get_es_version(config)
if es_version < [1, 0, 0]:
pytest.skip("Index metrics are only tested in version 1.0.0+")
elastic_check.check(instance)
for m_name in index_stats_for_version(es_version):
aggregator.assert_metric(m_name, tags=cluster_tags + ['index_name:testindex'])
def test_health_event(dd_environment, aggregator, elastic_check):
dummy_tags = ['elastique:recherche']
instance = {'url': URL, 'username': USER, 'password': PASSWORD, 'tags': dummy_tags}
config = from_instance(instance)
es_version = elastic_check._get_es_version(config)
# Should be yellow at first
requests.put(URL + '/_settings', data='{"index": {"number_of_replicas": 100}')
elastic_check.check(instance)
if es_version < [2, 0, 0]:
assert len(aggregator.events) == 1
assert sorted(aggregator.events[0]['tags']) == sorted(set(['url:{}'.format(URL)] + dummy_tags + CLUSTER_TAG))
else:
aggregator.assert_service_check('elasticsearch.cluster_health')
def test_metadata(dd_environment, aggregator, elastic_check, instance, version_metadata, datadog_agent):
elastic_check.check_id = 'test:123'
elastic_check.check(instance)
datadog_agent.assert_metadata('test:123', version_metadata)
datadog_agent.assert_metadata_count(len(version_metadata))
@pytest.mark.e2e
def test_e2e(dd_agent_check, elastic_check, instance, cluster_tags, node_tags):
aggregator = dd_agent_check(instance, rate=True)
_test_check(elastic_check, instance, aggregator, cluster_tags, node_tags)
|
[
"datadog_checks.elastic.metrics.index_stats_for_version",
"datadog_checks.elastic.config.from_instance",
"pytest.skip",
"datadog_checks.elastic.metrics.health_stats_for_version",
"datadog_checks.elastic.metrics.stats_for_version",
"requests.put",
"datadog_checks.elastic.metrics.pshard_stats_for_version",
"six.iteritems",
"logging.getLogger"
] |
[((581, 614), 'logging.getLogger', 'logging.getLogger', (['"""test_elastic"""'], {}), "('test_elastic')\n", (598, 614), False, 'import logging\n'), ((3141, 3164), 'datadog_checks.elastic.config.from_instance', 'from_instance', (['instance'], {}), '(instance)\n', (3154, 3164), False, 'from datadog_checks.elastic.config import from_instance\n'), ((3458, 3487), 'datadog_checks.elastic.metrics.stats_for_version', 'stats_for_version', (['es_version'], {}), '(es_version)\n', (3475, 3487), False, 'from datadog_checks.elastic.metrics import CLUSTER_PENDING_TASKS, STATS_METRICS, ADDITIONAL_METRICS_1_x, health_stats_for_version, index_stats_for_version, pshard_stats_for_version, stats_for_version\n'), ((3655, 3691), 'datadog_checks.elastic.metrics.health_stats_for_version', 'health_stats_for_version', (['es_version'], {}), '(es_version)\n', (3679, 3691), False, 'from datadog_checks.elastic.metrics import CLUSTER_PENDING_TASKS, STATS_METRICS, ADDITIONAL_METRICS_1_x, health_stats_for_version, index_stats_for_version, pshard_stats_for_version, stats_for_version\n'), ((4748, 4772), 'six.iteritems', 'iteritems', (['STATS_METRICS'], {}), '(STATS_METRICS)\n', (4757, 4772), False, 'from six import iteritems\n'), ((5033, 5056), 'datadog_checks.elastic.config.from_instance', 'from_instance', (['instance'], {}), '(instance)\n', (5046, 5056), False, 'from datadog_checks.elastic.config import from_instance\n'), ((5175, 5211), 'datadog_checks.elastic.metrics.pshard_stats_for_version', 'pshard_stats_for_version', (['es_version'], {}), '(es_version)\n', (5199, 5211), False, 'from datadog_checks.elastic.metrics import CLUSTER_PENDING_TASKS, STATS_METRICS, ADDITIONAL_METRICS_1_x, health_stats_for_version, index_stats_for_version, pshard_stats_for_version, stats_for_version\n'), ((5236, 5267), 'six.iteritems', 'iteritems', (['pshard_stats_metrics'], {}), '(pshard_stats_metrics)\n', (5245, 5267), False, 'from six import iteritems\n'), ((5780, 5803), 'datadog_checks.elastic.config.from_instance', 'from_instance', (['instance'], {}), '(instance)\n', (5793, 5803), False, 'from datadog_checks.elastic.config import from_instance\n'), ((6014, 6049), 'datadog_checks.elastic.metrics.index_stats_for_version', 'index_stats_for_version', (['es_version'], {}), '(es_version)\n', (6037, 6049), False, 'from datadog_checks.elastic.metrics import CLUSTER_PENDING_TASKS, STATS_METRICS, ADDITIONAL_METRICS_1_x, health_stats_for_version, index_stats_for_version, pshard_stats_for_version, stats_for_version\n'), ((6348, 6371), 'datadog_checks.elastic.config.from_instance', 'from_instance', (['instance'], {}), '(instance)\n', (6361, 6371), False, 'from datadog_checks.elastic.config import from_instance\n'), ((6464, 6542), 'requests.put', 'requests.put', (["(URL + '/_settings')"], {'data': '"""{"index": {"number_of_replicas": 100}"""'}), '(URL + \'/_settings\', data=\'{"index": {"number_of_replicas": 100}\')\n', (6476, 6542), False, 'import requests\n'), ((5898, 5960), 'pytest.skip', 'pytest.skip', (['"""Index metrics are only tested in version 1.0.0+"""'], {}), "('Index metrics are only tested in version 1.0.0+')\n", (5909, 5960), False, 'import pytest\n')]
|
# encoding = utf-8
__author__ = "<NAME>"
import socket
import threading
import datetime
import logging
FORMAT = "%(asctime)s %(threadName)s %(thread)d: %(message)s"
logging.basicConfig(format=FORMAT, level=logging.INFO)
class WeChat:
def __init__(self, ip="0.0.0.0", port=9000):
self.addr = (ip, port)
self._sock = socket.socket() # 创建一个socket, 私有的对象
self._sock.bind(self.addr) # 绑定地址
self.clients = {} # 空字典,用来保存所有的 socket
def start(self):
"""启动程序"""
self._sock.listen() # 启动监听
logging.info("server is listing...")
"""
这个accept线程,用来创建socket,每接入一个连接创建一个socket,多个recv线程
"""
threading.Thread(target=self.accept).start() # 需要启动accept线程,不然会阻塞 no-daemon线程
def accept(self):
while True:
sock, raddr = self._sock.accept() # 允许接入连接,获得一个本地的socket,和远端的ip:port,元组格式
self.clients[raddr] = sock
logging.info("{}:{} client is established".format(*raddr))
threading.Thread(target=self.recv, args=(sock, raddr)).start() # 接收数据,防止阻塞也要启动多线程
def recv(self, sock:socket.socket, raddr): # 传入这个连接的socket和远端的 ip,port
"""接收数据"""
while True:
data = sock.recv(1024) # 接收数据
if data == b'':
break
# 将收到的数据封装下,在发回去。二进制传输需要解码和编码
msg = "{:%Y%m%d %H:%M:%S} [{}:{}] - {} {}".format(datetime.datetime.now(), *raddr, "wellcome", data.decode())
print(msg)
for c in self.clients.values():
print(c)
c.send(msg.encode())
def stop(self):
"""关闭程序"""
while True:
cmd = input('>>> ')
if cmd == b'' or cmd == "quit": # 收到空信息或者quit 退出程序
self._sock.close() # 关闭这个连接
print(threading.enumerate())
def main():
a = WeChat()
a.start()
a.stop()
if __name__ == '__main__':
main()
|
[
"threading.Thread",
"logging.basicConfig",
"threading.enumerate",
"socket.socket",
"logging.info",
"datetime.datetime.now"
] |
[((168, 222), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': 'FORMAT', 'level': 'logging.INFO'}), '(format=FORMAT, level=logging.INFO)\n', (187, 222), False, 'import logging\n'), ((341, 356), 'socket.socket', 'socket.socket', ([], {}), '()\n', (354, 356), False, 'import socket\n'), ((560, 596), 'logging.info', 'logging.info', (['"""server is listing..."""'], {}), "('server is listing...')\n", (572, 596), False, 'import logging\n'), ((686, 722), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.accept'}), '(target=self.accept)\n', (702, 722), False, 'import threading\n'), ((1422, 1445), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1443, 1445), False, 'import datetime\n'), ((1838, 1859), 'threading.enumerate', 'threading.enumerate', ([], {}), '()\n', (1857, 1859), False, 'import threading\n'), ((1020, 1074), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.recv', 'args': '(sock, raddr)'}), '(target=self.recv, args=(sock, raddr))\n', (1036, 1074), False, 'import threading\n')]
|
'''
在单元格中放置控件
setItem:将文本放到单元格中
setCellWidget:将控件放到单元格中
setStyleSheet:设置控件的样式(QSS)
'''
import sys
from PyQt5.QtWidgets import (QWidget, QTableWidget, QHBoxLayout, QApplication, QTableWidgetItem, QAbstractItemView,
QComboBox, QPushButton)
class PlaceControlInCell(QWidget):
def __init__(self):
super(PlaceControlInCell,self).__init__()
self.initUI()
def initUI(self):
self.setWindowTitle("在单元格中放置控件")
self.resize(430, 300);
layout = QHBoxLayout()
tableWidget = QTableWidget()
tableWidget.setRowCount(4)
tableWidget.setColumnCount(3)
layout.addWidget(tableWidget)
tableWidget.setHorizontalHeaderLabels(['姓名','性别','体重(kg)'])
textItem = QTableWidgetItem('小明')
tableWidget.setItem(0,0,textItem)
combox = QComboBox()
combox.addItem('男')
combox.addItem('女')
# QSS Qt StyleSheet
combox.setStyleSheet('QComboBox{margin:3px};')
tableWidget.setCellWidget(0,1,combox)
modifyButton = QPushButton('修改')
modifyButton.setDown(True)
modifyButton.setStyleSheet('QPushButton{margin:3px};')
tableWidget.setCellWidget(0,2,modifyButton)
self.setLayout(layout)
if __name__ == '__main__':
app = QApplication(sys.argv)
example = PlaceControlInCell()
example.show()
sys.exit(app.exec_())
|
[
"PyQt5.QtWidgets.QComboBox",
"PyQt5.QtWidgets.QTableWidget",
"PyQt5.QtWidgets.QHBoxLayout",
"PyQt5.QtWidgets.QPushButton",
"PyQt5.QtWidgets.QTableWidgetItem",
"PyQt5.QtWidgets.QApplication"
] |
[((1312, 1334), 'PyQt5.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (1324, 1334), False, 'from PyQt5.QtWidgets import QWidget, QTableWidget, QHBoxLayout, QApplication, QTableWidgetItem, QAbstractItemView, QComboBox, QPushButton\n'), ((517, 530), 'PyQt5.QtWidgets.QHBoxLayout', 'QHBoxLayout', ([], {}), '()\n', (528, 530), False, 'from PyQt5.QtWidgets import QWidget, QTableWidget, QHBoxLayout, QApplication, QTableWidgetItem, QAbstractItemView, QComboBox, QPushButton\n'), ((553, 567), 'PyQt5.QtWidgets.QTableWidget', 'QTableWidget', ([], {}), '()\n', (565, 567), False, 'from PyQt5.QtWidgets import QWidget, QTableWidget, QHBoxLayout, QApplication, QTableWidgetItem, QAbstractItemView, QComboBox, QPushButton\n'), ((768, 790), 'PyQt5.QtWidgets.QTableWidgetItem', 'QTableWidgetItem', (['"""小明"""'], {}), "('小明')\n", (784, 790), False, 'from PyQt5.QtWidgets import QWidget, QTableWidget, QHBoxLayout, QApplication, QTableWidgetItem, QAbstractItemView, QComboBox, QPushButton\n'), ((851, 862), 'PyQt5.QtWidgets.QComboBox', 'QComboBox', ([], {}), '()\n', (860, 862), False, 'from PyQt5.QtWidgets import QWidget, QTableWidget, QHBoxLayout, QApplication, QTableWidgetItem, QAbstractItemView, QComboBox, QPushButton\n'), ((1072, 1089), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""修改"""'], {}), "('修改')\n", (1083, 1089), False, 'from PyQt5.QtWidgets import QWidget, QTableWidget, QHBoxLayout, QApplication, QTableWidgetItem, QAbstractItemView, QComboBox, QPushButton\n')]
|
import pandas as pd
import numpy as np
import multiprocessing
from multiprocessing import Process, Manager, Queue
import math
from PyProM.src.data.importing import Import
import sys
import os
from PyProM.src.utility.util_profile import Util_Profile
from PyProM.src.utility.util_multiprocessing import Util_Multiprocessing
import time
from functools import wraps
def timefn(fn):
@wraps(fn)
def measure_time(*args, **kwargs):
t1 = time.time()
result = fn(*args, **kwargs)
t2 = time.time()
print("@timefn: {} took {} seconds".format(fn.__name__, t2-t1))
return result
return measure_time
timefn = Util_Profile.timefn
class Eventlog(pd.DataFrame):
"""docstring for Eventlog"""
def __init__(self, *args, **kwargs):
super(Eventlog, self).__init__(*args, **kwargs)
self._columns = []
@property
def _constructor(self):
return Eventlog
@classmethod
def from_xes(cls, path):
_import = Import(path, format='xes')
dict_eventlog = _import.eventlog
if isinstance(dict_eventlog, dict):
print("import dict and produce eventlog")
df = Eventlog.from_dict(dict_eventlog)
return df
@classmethod
def from_txt(cls, path, sep='\t', encoding=None, **kwargs):
if 'dtype' in kwargs:
dtype = kwargs['dtype']
else:
dtype = None
if 'index_col' in kwargs:
index_col = kwargs['index_col']
else:
index_col=False
df = pd.read_csv(path, sep = sep, index_col = index_col, dtype=dtype, encoding=encoding)
return Eventlog(df)
"""
def __call__(self, path, format='xes'):
if format == 'xes':
_import = Import(path, format='xes')
dict_eventlog = _import.eventlog
return self.dict_to_dataframe(dict_eventlog)
if format == 'txt':
return self.csv_to_dataframe(path)
"""
@timefn
def assign_caseid(self, *args):
count = 0
for arg in args:
if count == 0:
self['CASE_ID'] = self[arg].apply(str)
else:
self['CASE_ID'] += '_' + self[arg].apply(str)
#del self[arg]
count +=1
self._columns.append('CASE_ID')
return self
@timefn
def assign_activity(self, *args):
count = 0
for arg in args:
if count == 0:
self['Activity'] = self[arg].apply(str)
else:
self['Activity'] += '_' + self[arg].apply(str)
#del self[arg]
count +=1
self._columns.append('Activity')
return self
@timefn
def assign_resource(self, *args):
count = 0
for arg in args:
if count == 0:
self['Resource'] = self[arg].astype(str)
else:
self['Resource'] += '_' + self[arg].astype(str)
#del self[arg]
count +=1
self._columns.append('Resource')
return self
@timefn
def assign_timestamp(self, name, new_name = 'TIMESTAMP', _format = '%Y/%m/%d %H:%M:%S', errors='ignore'):
print(_format)
self[name] = pd.to_datetime(self[name], format = _format, errors=errors)
self.rename(columns={name: new_name}, inplace=True)
#self.loc[pd.isna(self[name]),name] = '-'
self._columns.append(new_name)
return self
def assign_attr(self, **kwargs):
"""
이 함수는, ~~~~다.
#할일: 컬럼명만 바꾸는 것으로!
:param kwargs: old_col=데이터에 포함된 컬럼명, new_col=생성한 이벤트로그에 지정할 컬럼명
:return: 이벤트로그
"""
if 'old_col' in kwargs:
old_col = kwargs['old_col']
if 'new_col' in kwargs:
new_col = kwargs['new_col']
else:
new_col = kwargs['old_col']
self[new_col] = self[old_col]
self._columns.append(new_col)
del self[old_col]
self._columns.append(new_col)
return self
def assign_cluster(self, *args):
count = 0
for arg in args:
if count == 0:
self['Cluster'] = self[arg].astype(str)
else:
self['Cluster'] += '_' + self[arg].astype(str)
#del self[arg]
count +=1
self._columns.append('Cluster')
return self
def sort(self, by=['CASE_ID']):
self = self.sort_values(by)
return self
def clear_columns(self, *args, **kwargs):
if 'extra' in kwargs:
extra = kwargs['extra']
else:
extra = []
self = self[self._columns]
return self
def join_columns(self, col_name, *args):
if len(args) < 2:
print("join_columns requires at least 2 columns")
count = 0
tmp = self.copy(deep=True)
for arg in args:
if count == 0:
self[col_name] = tmp[arg].astype(str)
else:
self[col_name] += '/' + tmp[arg].astype(str)
#del self[arg]
count +=1
return self
"""
utility functions
"""
def get_event_trace(self, workers, value = 'Activity'):
output = self.parallelize(self._get_event_trace, workers, value)
event_trace = Util_Multiprocessing.join_dict(output)
return event_trace
def _get_event_trace(self, eventlog, x, value='Activity'):
event_trace = dict()
count = 0
for instance in eventlog.itertuples():
index = instance.Index
if value == 'Activity':
ai = eventlog.get_activity_by_index(index)
elif value == 'Resource':
ai = eventlog.get_resource_by_index(index)
elif value == 'TIMESTAMP':
ai = eventlog.get_timestamp_by_index(index)
else:
ai = eventlog.get_col_value_by_index(value, index)
if index == 0:
event_trace[instance.CASE_ID] = [ai]
continue
caseid = eventlog.get_caseid_by_index(index-1)
if instance.CASE_ID == caseid:
event_trace[instance.CASE_ID].append(ai)
else:
event_trace[instance.CASE_ID] = [ai]
print("Finish")
x.append(event_trace)
def _get_trace_count(self, event_trace):
trace_count = dict()
traces = event_trace.values()
for trace in traces:
trace = tuple(trace)
if trace not in trace_count:
trace_count[trace] = 0
trace_count[trace] += 1
return trace_count
def get_caseids(self):
unique_caseids = self['CASE_ID'].unique()
return unique_caseids
def get_activities(self):
unique_activities = self['Activity'].unique()
return unique_activities
def get_resources(self):
unique_resources = self['Resource'].unique()
return unique_resources
def get_timestamps(self):
unique_timestamps = self['TIMESTAMP'].unique()
return unique_timestamps
#특정 col의 unique한 값을 리스트 형태로 리턴
def get_col_values(self,col):
return list(set(self[col]))
def get_first_caseid(self):
return self['CASE_ID'][0]
def get_caseid_by_index(self,index):
return self['CASE_ID'][index]
def get_resource_by_index(self, index):
return self['Resource'][index]
def get_activity_by_index(self, index):
return self['Activity'][index]
def get_timestamp_by_index(self, index):
return self['TIMESTAMP'][index]
def get_col_value_by_index(self, col, index):
return self[col][index]
#특정 col의 특정 value를 포함하는 row를 리턴
def get_col_value(self, col, value):
value_df = self.loc[self[col]==value]
value_df.name = value
return value_df
def change_col_value(self, col, old_val, new_val):
self.loc[self[col]==old_val, col] = new_val
return self
def col_val_to_numeric(self, col):
"""
To make a chart using bokeh, x values and y values must be numeric.
Accordingly, change column values to numeric so that it can be properly drawn by bokeh
Key arguements
col -- column to be converted to numeric
"""
self.sort_values(by=col, inplace=True)
self.reset_index(drop=True, inplace=True)
indexs = []
i=1
for index, instance in self.iterrows():
if index==0:
indexs.append(i)
continue
value = self[col][index-1]
if instance[col] != value:
i+=1
indexs.append(i)
self.loc[:, 'new_col'] = indexs
return self
def filter(self, criterion, value):
return self.loc[self[criterion] == value, :]
# 특정 col에 특정 value를 포함하는 row를 삭제
def remove_col_value(self, col, value):
return self.loc[self[col] != value]
#eventlog의 event 총 개수를 리턴
def count_event(self):
return len(self.index)
#eventlog 내 case의 개수를 리턴
def count_case(self):
return len(set(self['CASE_ID']))
#특정 col의 unique한 값의 개수를 리턴
def count_col_values(self, col):
return len(set(self[col]))
#모든 col의 unique한 값의 개수를 프린트함
def show_col_counts(self):
columns = self.columns
for col in columns:
print("unique counts of {}: {}".format(col,len(set(self[col]))))
def count_col_case(self, col):
col_case = self.groupby(col).CASE_ID.apply(list).apply(set)
col_case_count = col_case.apply(len)
col_case_count_mean = np.mean(col_case_count)
col_case_count_std = np.std(col_case_count)
print("CLUSTER count: {}".format(col_case_count))
print("CLUSTER count mean: {}".format(col_case_count_mean))
print("CLUSTER count std: {}".format(col_case_count_std))
return col_case_count
def count_duplicate_values(self, eventlog, **kwargs):
"""특정 값이 중복되는 경우 중복횟수의 빈도를 return함
e.g. 1번 중복: 100, 2번 중복: 300
Keyword arguments:
col -- 특정 col이 중복된 것을 확인하고 싶은 경우 (default: Activity)
"""
if 'col' in kwargs:
col = kwargs['col']
traces = eventlog.get_event_trace(workers=4, value=col)
else:
traces = eventlog.get_event_trace(workers=4, value='Activity')
count=0
inv_act_counts = []
for t in traces:
act_count = dict(Counter(traces[t]))
inv_act_count = dict()
for k,v in act_count.items():
if v < 2:
continue
if v in inv_act_count:
inv_act_count[v].append(k)
else:
inv_act_count[v] = [k]
inv_act_counts.append(inv_act_count)
count_result_step = dict()
for inv_act_count in inv_act_counts:
for k in inv_act_count:
if k not in count_result_step:
count_result_step[k] = 1
else:
count_result_step[k] += 1
result = pd.DataFrame(list(count_result_step.items()), columns=['repetition', 'count'])
return result
def count_loops(self, eventlog, **kwargs):
"""step이 연속된 경우를 count함. Step1-->Step1인 경우 1, Step1-->Step1-->Step1인 경우 2, 동시에 동일 device에서 수행되었는지도 계산함
Keyword arguments:
col -- 특정 col이 중복된 것을 확인하고 싶은 경우 (default: Activity)
value -- 특정 값이 연속된 것을 확인하고 싶은 경우 e.g. 'Null'
"""
if 'col' in kwargs:
col = kwargs['col']
traces = eventlog.get_event_trace(workers=4, value=col)
else:
traces = eventlog.get_event_trace(workers=4, value='Activity')
count=0
if 'value' in kwargs:
value = kwargs['value']
else:
value = 'default'
for t, r in zip(traces, resource_traces):
for index, act in enumerate(traces[t]):
if index == len(traces[t]) -1:
continue
if value == 'default':
count+=1
else:
if act == value and traces[t][index+1] == value:
count+=1
print("count_consecutives: {}".format(count))
return count
def describe(self):
print("# events: {}".format(len(self)))
print("# cases: {}".format(len(set(self['CASE_ID']))))
print("# activities: {}".format(len(set(self['Activity']))))
print("# resources: {}".format(len(set(self['Resource']))))
try:
print("average yield: {}".format(np.mean(self['VALUE'])))
except AttributeError:
print("yield not exists")
def split_on_case(self, split):
caseid = self.get_caseids()
sub_cases = []
for d in np.array_split(caseid, split):
sub_cases.append(d)
sub_logs = []
for i in range(len(sub_cases)):
sub_log = self.loc[self['CASE_ID'].isin(sub_cases[i]), :]
sub_log.reset_index(drop=True, inplace=True)
sub_logs.append(sub_log)
return sub_logs
def parallelize(self, func, workers=multiprocessing.cpu_count(), *args):
sublogs = self.split_on_case(workers)
output = Queue()
manager = Manager()
output = manager.list()
# Setup a list of processes that we want to run
processes = [Process(target=func, args=(sublogs[i], output)+args) for i in range(len(sublogs))]
# Run processes
for p in processes:
p.start()
# Exit the completed processes
for p in processes:
p.join()
return output
#Relation Dictionary(key : AfterActovoty,, value : PreActivity list)
##You need to specify the objective of this function
##Additionally, please try to make the code below more efficient. (Both in terms of performance and visibility)
def relation_dictionary(self, pre_col, aft_col):
relation_set = {}
aft_activity_list = self.get_col_values(pre_col)
for i in aft_activity_list:
relation_set[i] = []
for i in range(len(self)):
relation_set[self[aft_col][i]].append(self[pre_col][i])
return relation_set
if __name__ == '__main__':
"""
eventlog = Eventlog.from_xes('./example/running_example.xes')
print(type(eventlog))
"""
eventlog = Eventlog.from_txt('/Users/GYUNAM/Desktop/LAB/SAMSUNG_PROJECT/IMPLE/input/Sample_data.txt')
eventlog = eventlog.assign_caseid('ROOT_LOT_ID', 'WAFER_ID')
eventlog = eventlog.assign_timestamp('TKIN_TIME', 'TKOUT_TIME')
print(eventlog)
|
[
"pandas.read_csv",
"numpy.std",
"multiprocessing.Manager",
"time.time",
"PyProM.src.utility.util_multiprocessing.Util_Multiprocessing.join_dict",
"numpy.mean",
"pandas.to_datetime",
"PyProM.src.data.importing.Import",
"functools.wraps",
"numpy.array_split",
"multiprocessing.Queue",
"multiprocessing.Process",
"multiprocessing.cpu_count"
] |
[((383, 392), 'functools.wraps', 'wraps', (['fn'], {}), '(fn)\n', (388, 392), False, 'from functools import wraps\n'), ((436, 447), 'time.time', 'time.time', ([], {}), '()\n', (445, 447), False, 'import time\n'), ((486, 497), 'time.time', 'time.time', ([], {}), '()\n', (495, 497), False, 'import time\n'), ((910, 936), 'PyProM.src.data.importing.Import', 'Import', (['path'], {'format': '"""xes"""'}), "(path, format='xes')\n", (916, 936), False, 'from PyProM.src.data.importing import Import\n'), ((1359, 1438), 'pandas.read_csv', 'pd.read_csv', (['path'], {'sep': 'sep', 'index_col': 'index_col', 'dtype': 'dtype', 'encoding': 'encoding'}), '(path, sep=sep, index_col=index_col, dtype=dtype, encoding=encoding)\n', (1370, 1438), True, 'import pandas as pd\n'), ((2703, 2760), 'pandas.to_datetime', 'pd.to_datetime', (['self[name]'], {'format': '_format', 'errors': 'errors'}), '(self[name], format=_format, errors=errors)\n', (2717, 2760), True, 'import pandas as pd\n'), ((4375, 4413), 'PyProM.src.utility.util_multiprocessing.Util_Multiprocessing.join_dict', 'Util_Multiprocessing.join_dict', (['output'], {}), '(output)\n', (4405, 4413), False, 'from PyProM.src.utility.util_multiprocessing import Util_Multiprocessing\n'), ((8022, 8045), 'numpy.mean', 'np.mean', (['col_case_count'], {}), '(col_case_count)\n', (8029, 8045), True, 'import numpy as np\n'), ((8069, 8091), 'numpy.std', 'np.std', (['col_case_count'], {}), '(col_case_count)\n', (8075, 8091), True, 'import numpy as np\n'), ((10629, 10658), 'numpy.array_split', 'np.array_split', (['caseid', 'split'], {}), '(caseid, split)\n', (10643, 10658), True, 'import numpy as np\n'), ((10926, 10953), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (10951, 10953), False, 'import multiprocessing\n'), ((11014, 11021), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (11019, 11021), False, 'from multiprocessing import Process, Manager, Queue\n'), ((11034, 11043), 'multiprocessing.Manager', 'Manager', ([], {}), '()\n', (11041, 11043), False, 'from multiprocessing import Process, Manager, Queue\n'), ((11135, 11189), 'multiprocessing.Process', 'Process', ([], {'target': 'func', 'args': '((sublogs[i], output) + args)'}), '(target=func, args=(sublogs[i], output) + args)\n', (11142, 11189), False, 'from multiprocessing import Process, Manager, Queue\n'), ((10458, 10480), 'numpy.mean', 'np.mean', (["self['VALUE']"], {}), "(self['VALUE'])\n", (10465, 10480), True, 'import numpy as np\n')]
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
SQLALCHEMY_ECHO = True
SQLALCHEMY_TRACK_MODIFICATIONS = True
SQLALCHEMY_DATABASE_URI = "postgresql://postgres:password@postgres:5432/case_service"
#SQLALCHEMY_DATABASE_URI = "sqlite:///db.sqlite3"
|
[
"os.path.dirname"
] |
[((37, 62), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (52, 62), False, 'import os\n')]
|
"""strategypattern_traditional
Example to show one way of implementing strategy design pattern in Python.
The example shown here resembles a 'traditional' implementation of strategy
pattern in Python (traditional = the one you may implement in languages like
C++). For a more Pythonic approach, see the file strategypattern_pythonic.py.
This example is created to illustrate a design pattern discussed in the book
Learning Python Application Development (Packt Publishing). See the book for
further details.
This module is compatible with Python 2.7.9. It contains
supporting code for the book, Learning Python Application Development,
Packt Publishing.
RUNNING THE PROGRAM:
Assuming you have python in your environment variable PATH, type the following
in the command prompt to run the program:
python name_of_the_file.py
(Replace name_of_the_file.py with the name of this file)
.. todo:: You will find several classes put inside this module. This is done
just for the ease of illustration. As an exercise, you can put each
class in its own module. If you are using a Python IDE, you could also
use refactoring feature of the IDE (if available) for this work.
:copyright: 2016, <NAME>
:license: The MIT License (MIT) . See LICENSE file for further details.
"""
from __future__ import print_function
import sys
from abc import ABCMeta, abstractmethod
# Make sure we are 'not' using Python 3. (should really be Python 2.7.9)
if sys.version_info >= (3, 0):
print("This code requires Python 2.7.9 ")
print("Looks like you are trying to run this using "
"Python version: %d.%d " % (sys.version_info[0], sys.version_info[1]))
print("Exiting...")
sys.exit(1)
class AbstractGameUnit:
"""Base class for all the game characters.
:arg string name: Name of the game character.
:arg JumpStrategy jump_object: Could be an instance of JumpStrategy or
its subclasses.(or None if unspecified)
:ivar jump_strategy: Choose the algorithm for jumping.
"""
__metaclass__ = ABCMeta
def __init__(self, name, jump_object=None):
self.jump_strategy = None
self.name = name
self.set_jump_strategy(jump_object)
def set_jump_strategy(self, jump_object=None):
"""Set up the object that defines the jump strategy.
Choose an algorithm that defines the jump behavior. The algorithm is
represented by a 'strategy object'.
:arg JumpStrategy jump_object: Instance of the class that should handle
how this game unit 'jumps' . Could be instance of
JumpStrategy or its subclasses (or None if unspecified)
"""
if isinstance(jump_object, JumpStrategy):
self.jump_strategy = jump_object
else:
self.jump_strategy = JumpStrategy()
# print("\tset_jump_strategy: self.jump_strategy:",
# type(self.jump_strategy).__name__)
def jump(self):
"""Perform jump operation (delegated)"""
try:
self.jump_strategy.jump()
except AttributeError as e:
print("Error: AbstractGameUnit.jump: self.jump_strategy: {} "
"\nError details: {} ".format(self.jump_strategy, e.args))
@abstractmethod
def info(self):
""""Print information about this game unit."""
pass
class DwarfFighter(AbstractGameUnit):
"""Create a DwarfFighter instance"""
def info(self):
"""Print info about thus unit, overrides superclass method."""
print("I am a great dwarf of the eastern foo mountain!")
class JumpStrategy:
"""Base Class representing a jump strategy (an algorithm)."""
def jump(self):
"""The actual jump algorithm.
.. seealso: AbstractGameUnit.jump() where this is called
(if this jump strategy is chosen)
"""
print("--> JumpStrategy.jump: Default jump")
class CanNotJump(JumpStrategy):
"""Class whose instance represents a jump algorithm."""
def jump(self):
"""The actual jump algorithm.
.. seealso: AbstractGameUnit.jump() where this is called
(if this jump strategy is chosen)
"""
print("--> CanNotJump.jump: I can not jump")
class HorseJump(JumpStrategy):
"""Class whose instance represents a jump algorithm."""
def jump(self):
"""The actual jump algorithm.
.. seealso: AbstractGameUnit.jump() where this is called
(if this jump strategy is chosen)
"""
print("--> HorseJump.jump: Jumping my horse.")
class PowerJump(JumpStrategy):
"""Class whose instance represents a jump algorithm."""
def jump(self):
"""The actual jump algorithm.
.. seealso: AbstractGameUnit.jump() where this is called
(if this jump strategy is chosen)
"""
print("--> PowerJump.jump: I can jump 100 feet from the ground!")
if __name__ == '__main__':
# Create a jump strategy instance (algorithm representing a jump behavior)
jump_strategy = CanNotJump()
# Pass it to the DwarfFighter.
dwarf = DwarfFighter("Dwarf", jump_strategy)
print("\n{STRATEGY-I} Dwarf trying to jump:")
# The dwarf instance will use the jump strategy represented by CanNotJump()
dwarf.jump()
print("-"*56)
# Optionally change the jump strategy later
print("\n{STRATEGY-II} Dwarf given a 'magic potion' to jump:")
dwarf.set_jump_strategy(PowerJump())
dwarf.jump()
print("-"*56)
|
[
"sys.exit"
] |
[((1729, 1740), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1737, 1740), False, 'import sys\n')]
|
import conllu
from sklearn.metrics import confusion_matrix
gold = conllu.parse(open('cs-ud-test.conllu', 'r').read())
pred = conllu.parse(open('cs-ud-test-pred.conllu', 'r').read())
gold_labels = [t['deprel'] for sent in gold for t in sent]
pred_labels = [t['deprel'] for sent in pred for t in sent]
vocab = sorted(list(set(gold_labels + pred_labels)))
print("\t".join(['_'] + vocab))
for i,line in enumerate(confusion_matrix(gold_labels, pred_labels, labels=vocab)):
print("\t".join([vocab[i]] + [n if int(n) > 0 else "" for n in list(map(str, list(line)))]))
|
[
"sklearn.metrics.confusion_matrix"
] |
[((412, 468), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['gold_labels', 'pred_labels'], {'labels': 'vocab'}), '(gold_labels, pred_labels, labels=vocab)\n', (428, 468), False, 'from sklearn.metrics import confusion_matrix\n')]
|
# Copyright (c) 2021 Graphcore Ltd. All rights reserved.
from popart.ir.tensor import Tensor
from popart.ir.graph import Graph
import popart._internal.ir as _ir
from popart.ir import dtypes
from typing import Optional
def cast_if_needed(t: Tensor, data_type: dtypes.dtype) -> Tensor:
from popart.ir.ops.cast import cast
if t.dtype != data_type:
return cast(t, data_type)
return t
def check_in_graph(graph: Graph, *tensors: Tensor):
"""Checks if tensors are in graph. If not, raises a ValueError."""
for tensor in tensors:
if tensor not in graph:
raise ValueError(
f"Tensor {tensor.name} is not in the current Graph {graph.name}."
)
def convert_optional_float(v: Optional[float]):
return _ir.OptionalFloat(v) if v is not None else _ir.OptionalFloat()
|
[
"popart._internal.ir.OptionalFloat",
"popart.ir.ops.cast.cast"
] |
[((371, 389), 'popart.ir.ops.cast.cast', 'cast', (['t', 'data_type'], {}), '(t, data_type)\n', (375, 389), False, 'from popart.ir.ops.cast import cast\n'), ((774, 794), 'popart._internal.ir.OptionalFloat', '_ir.OptionalFloat', (['v'], {}), '(v)\n', (791, 794), True, 'import popart._internal.ir as _ir\n'), ((817, 836), 'popart._internal.ir.OptionalFloat', '_ir.OptionalFloat', ([], {}), '()\n', (834, 836), True, 'import popart._internal.ir as _ir\n')]
|
from django.core.exceptions import ValidationError
'''
RowHandler objects
An object inheriting from RowHandler() should be created for each different 'type' of row
(the 'type' being determined by a unique combination of columns)
process_row (input):
row data obtained from the Importer objects.
get_result (output):
A row result dictionary containing errors, validation error, warnings, and row data
'''
class GenericRowHandler():
def __init__(self):
self.errors = {}
self.warnings = {}
# optional - in case the Importer needs the current row main object from the RowHandler
self.row_object = None
def process_row(self, **kwargs):
if self.errors == {}:
self.process_row_inner(**kwargs)
result = self.get_result()
return result
def get_result(self):
warnings = []
for (k, v) in (self.warnings).items():
if v:
warnings.append(f"{k} : {v}")
return {'errors': [], 'validation_error': ValidationError(self.errors), 'warnings': warnings}
|
[
"django.core.exceptions.ValidationError"
] |
[((1062, 1090), 'django.core.exceptions.ValidationError', 'ValidationError', (['self.errors'], {}), '(self.errors)\n', (1077, 1090), False, 'from django.core.exceptions import ValidationError\n')]
|
from datetime import datetime, timedelta
from django.conf import settings
from django.contrib.postgres.search import SearchQuery
from django.db.models import Count
from django.shortcuts import render
from auth.helpers import auth_required
from common.models import group_by, top
from common.pagination import paginate
from users.models.expertise import UserExpertise
from users.models.tags import Tag
from users.models.user import User
@auth_required
def people(request):
users = User.registered_members().order_by("-created_at").select_related("geo")
query = request.GET.get("query")
if query:
users = users.filter(index__index=SearchQuery(query, config="russian"))
tags = request.GET.getlist("tags")
if tags:
users = users.filter(index__tags__contains=tags)
country = request.GET.get("country")
if country:
users = users.filter(country=country)
filters = request.GET.getlist("filters")
if filters:
if "faang" in filters:
users = users.filter(company__in=[
"Facebook", "Apple", "Google", "Amazon", "Netflix", "Microsoft",
"Фейсбук", "Гугл", "Амазон", "Нетфликс", "Майкрософт", "Микрософт"
])
if "same_city" in filters:
users = users.filter(city=request.me.city)
if "activity" in filters:
users = users.filter(last_activity_at__gte=datetime.utcnow() - timedelta(days=30))
tags_with_stats = Tag.tags_with_stats()
tag_stat_groups = group_by(tags_with_stats, "group", todict=True)
tag_stat_groups.update({
"travel": [tag for tag in tag_stat_groups.get(Tag.GROUP_CLUB, []) if tag.code in {
"can_coffee", "can_city", "can_beer", "can_office", "can_sleep",
}],
"grow": [tag for tag in tag_stat_groups.get(Tag.GROUP_CLUB, []) if tag.code in {
"can_advice", "can_project", "can_teach", "search_idea",
"can_idea", "can_invest", "search_mentor", "can_mentor", "can_hobby"
}],
"work": [tag for tag in tag_stat_groups.get(Tag.GROUP_CLUB, []) if tag.code in {
"can_refer", "search_employees", "search_job", "search_remote", "search_relocate"
}],
"money": tag_stat_groups.get(Tag.GROUP_MONEY, []),
})
active_countries = User.registered_members().filter(country__isnull=False)\
.values("country")\
.annotate(country_count=Count("country"))\
.order_by("-country_count")
users_total = users.count()
if users_total < 200:
user_expertise = UserExpertise.objects.filter(user_id__in=[u.id for u in users])
else:
user_expertise = UserExpertise.objects.all()
map_stat_groups = {
"💼 Топ компаний": top(users, "company", skip={"-"})[:5],
"🏰 Города": top(users, "city")[:5],
# "🎬 Экспертиза": top(user_expertise, "name")[:5],
}
return render(request, "users/people.html", {
"people_query": {
"query": query,
"country": country,
"tags": tags,
"filters": filters,
},
"users": users,
"users_total": users_total,
"users_paginated": paginate(request, users, page_size=settings.PEOPLE_PAGE_SIZE),
"tag_stat_groups": tag_stat_groups,
"max_tag_user_count": max(tag.user_count for tag in tags_with_stats),
"active_countries": active_countries,
"map_stat_groups": map_stat_groups,
})
|
[
"users.models.expertise.UserExpertise.objects.filter",
"django.contrib.postgres.search.SearchQuery",
"users.models.user.User.registered_members",
"users.models.tags.Tag.tags_with_stats",
"common.pagination.paginate",
"users.models.expertise.UserExpertise.objects.all",
"datetime.datetime.utcnow",
"common.models.top",
"datetime.timedelta",
"django.db.models.Count",
"common.models.group_by"
] |
[((1469, 1490), 'users.models.tags.Tag.tags_with_stats', 'Tag.tags_with_stats', ([], {}), '()\n', (1488, 1490), False, 'from users.models.tags import Tag\n'), ((1513, 1560), 'common.models.group_by', 'group_by', (['tags_with_stats', '"""group"""'], {'todict': '(True)'}), "(tags_with_stats, 'group', todict=True)\n", (1521, 1560), False, 'from common.models import group_by, top\n'), ((2563, 2626), 'users.models.expertise.UserExpertise.objects.filter', 'UserExpertise.objects.filter', ([], {'user_id__in': '[u.id for u in users]'}), '(user_id__in=[u.id for u in users])\n', (2591, 2626), False, 'from users.models.expertise import UserExpertise\n'), ((2662, 2689), 'users.models.expertise.UserExpertise.objects.all', 'UserExpertise.objects.all', ([], {}), '()\n', (2687, 2689), False, 'from users.models.expertise import UserExpertise\n'), ((2755, 2788), 'common.models.top', 'top', (['users', '"""company"""'], {'skip': "{'-'}"}), "(users, 'company', skip={'-'})\n", (2758, 2788), False, 'from common.models import group_by, top\n'), ((2809, 2827), 'common.models.top', 'top', (['users', '"""city"""'], {}), "(users, 'city')\n", (2812, 2827), False, 'from common.models import group_by, top\n'), ((3182, 3243), 'common.pagination.paginate', 'paginate', (['request', 'users'], {'page_size': 'settings.PEOPLE_PAGE_SIZE'}), '(request, users, page_size=settings.PEOPLE_PAGE_SIZE)\n', (3190, 3243), False, 'from common.pagination import paginate\n'), ((654, 690), 'django.contrib.postgres.search.SearchQuery', 'SearchQuery', (['query'], {'config': '"""russian"""'}), "(query, config='russian')\n", (665, 690), False, 'from django.contrib.postgres.search import SearchQuery\n'), ((488, 513), 'users.models.user.User.registered_members', 'User.registered_members', ([], {}), '()\n', (511, 513), False, 'from users.models.user import User\n'), ((2423, 2439), 'django.db.models.Count', 'Count', (['"""country"""'], {}), "('country')\n", (2428, 2439), False, 'from django.db.models import Count\n'), ((1406, 1423), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1421, 1423), False, 'from datetime import datetime, timedelta\n'), ((1426, 1444), 'datetime.timedelta', 'timedelta', ([], {'days': '(30)'}), '(days=30)\n', (1435, 1444), False, 'from datetime import datetime, timedelta\n'), ((2306, 2331), 'users.models.user.User.registered_members', 'User.registered_members', ([], {}), '()\n', (2329, 2331), False, 'from users.models.user import User\n')]
|
import requests
import xmljson
import xml.etree.ElementTree as elemTree
from json import dump, load
import os
import jsonbender
import json
from collections import OrderedDict
from jsonschema.validators import RefResolver, Draft4Validator
from validate.jsonschema_validator import validate_instance
class FlowRepoClient:
"""
A class that provides functionality to download experiments from the
FlowRepository (https://flowrepository.org/), transform the XML into JSON
and validate the JSON instances against their JSON schema.
The transformation from XML to JSON relies on the
JSONBender library (https://github.com/Onyo/jsonbender).
"""
def __init__(self, mapping, client_id, number_of_items):
""" The class constructor
:param mapping: the mapping dictionary containing the jsonbender objects
(see https://github.com/Onyo/jsonbender)
:type mapping: file
:param client_id: identifier of the client
:type client_id: str
"""
self.errors = {}
self.instances = []
self.main_context_url = ""
self.clientID = client_id
self.item_number = number_of_items
self.bender_mapping_file = mapping
self.mapping_url = "https://fairsharing.github.io/mircat/miflowcyt/" \
"schema_context_mapping.json"
self.base_schema = "experiment_schema.json"
self.schema_url = "https://w3id.org/mircat/miflowcyt/schema/" + self.base_schema
def get_user_content_id(self):
""" Return all IDs found in the user content XML
:return: a list of all IDs there were identified in the variable returned by the API
"""
full_url = "http://flowrepository.org/list?client=" + self.clientID
ids = []
response = requests.request("GET", full_url)
if response.status_code == 404:
return Exception("Verify your client ID (" + self.clientID + ")")
else:
user_data = xmljson.parker.data((elemTree.fromstring(response.text)))
if "public-experiments" in user_data.keys() \
and "experiment" in user_data["public-experiments"].keys():
for experiment in user_data['public-experiments']['experiment']:
ids.append(experiment['id'])
return ids
def grab_experiment_from_api(self, item_identifier):
""" Retrieve the experimental metadata and return it as XML document object
:param item_identifier: the item identifier that should be retrieved
:return: the XML document object
"""
full_url = "http://flowrepository.org/list/" \
+ item_identifier \
+ "?client=" \
+ self.clientID
response = requests.request("GET", full_url)
if response.status_code == 404 or response.status_code == 400:
return Exception("Item %s could not be found" % item_identifier)
return response.text
def get_all_experiments(self, max_number, accessible_ids):
""" Grab all experiments from the API for the given number
:param max_number: the number of item to retrieve
:type max_number: int
:param accessible_ids: the ids that this use can fetch
:type accessible_ids: list
:return: the experiments XMLs
"""
contents = {}
if max_number < len(accessible_ids):
for i in range(max_number):
current_content = self.grab_experiment_from_api(accessible_ids[i])
contents[accessible_ids[i]] = current_content
return contents
@staticmethod
def validate_instance_from_file(instance, item_id, schema_name):
""" Method to output the extracted JSON into a file and validate it against the given schema
:param instance: the instance to output into a file
:param item_id: the instance ID needed to create the file name
:param schema_name: the schema to check against
:return errors: a list of fields that have an error for this instance
"""
try:
file_name = item_id + '.json'
file_full_path = os.path.join(os.path.dirname(__file__),
"../tests/data/MiFlowCyt/" + file_name)
with open(file_full_path, 'w') as outfile:
dump(instance, outfile, indent=4)
outfile.close()
errors = validate_instance(
os.path.join(os.path.dirname(__file__), "../tests/data/MiFlowCyt/"),
schema_name,
os.path.join(os.path.dirname(__file__), "../tests/data/MiFlowCyt/"),
file_name,
1,
{})
return errors
except FileNotFoundError:
return Exception("Please provide a valid schema")
@staticmethod
def get_mapping(mapping_file_name):
""" Build the mapping dictionary based on the given mapping file
:param mapping_file_name: the name of the mapping file
:return mapping: the mapping of the fields
"""
try:
mapping = {}
with open(mapping_file_name) as mapping_var:
raw_mapping = load(mapping_var)
mapping_var.close()
# For each mapped field in the mapping file
for mapped_item in raw_mapping:
# if the value of the field is a string
if isinstance(raw_mapping[mapped_item], str):
mapping[mapped_item] = jsonbender.OptionalS(raw_mapping[mapped_item])
# if the value of the field is an object
elif isinstance(raw_mapping[mapped_item], object):
# Raise an error if a value/option is missing
if 'value' not in raw_mapping[mapped_item] or \
('benderOption' not in raw_mapping[mapped_item]):
raise Exception("The mapping file is missing a value or the bender option "
"for " + mapped_item)
else:
if raw_mapping[mapped_item]['benderOption'] == "default":
mapping[mapped_item] = \
jsonbender.OptionalS(raw_mapping[mapped_item]['value'])
elif raw_mapping[mapped_item]['benderOption'] == "raiseErrors":
mapping[mapped_item] = jsonbender.S(raw_mapping[mapped_item]['value'])
elif raw_mapping[mapped_item]['benderOption'] == "simple":
mapping[mapped_item] = jsonbender.K(raw_mapping[mapped_item]['value'])
elif raw_mapping[mapped_item]['benderOption'] == "inject":
mapping[mapped_item] = jsonbender.F(raw_mapping[mapped_item]['value'])
return mapping
except FileNotFoundError:
return Exception("Mapping file wasn't found")
def inject_context(self):
"""
Transform the myflowcyt JSON into a JSON-LD by injecting @context and @type keywords
:return: a JSON-LD of the myflowcyt JSON
"""
instances, errors = self.make_validation()
context_mapping = json.loads(requests.get(self.mapping_url).text)["contexts"]
self.main_context_url = context_mapping[self.base_schema]
for instance_name in instances:
instance = instances[instance_name]
for field in instance:
prop = field + "_schema.json"
if prop in context_mapping.keys():
if type(instance[field]) == list:
for item in instance[field]:
item["@context"] = context_mapping[prop]
item["@type"] = field.capitalize()
elif type(instance[field]) == dict:
instance[field]["@context"] = context_mapping[prop]
instance[field]["@type"] = field.capitalize()
instance["@context"] = self.main_context_url
instance["@type"] = "Experiment"
return instances
def preprocess_content(self, content):
"""
Preprocess the XML into a JSON that is compliant with the schema.
:param content: str containing the XML
:type content: str
:return: a JSON schema cleaned from residual artifacts
"""
mapping = self.get_mapping(self.bender_mapping_file)
experience_metadata = xmljson.parker.data((elemTree.fromstring(
content)))["public-experiments"]["experiment"]
extracted_json = jsonbender.bend(mapping, experience_metadata)
if extracted_json['organization'] == "\n ":
extracted_json['organization'] = []
if extracted_json['keywords'] == "\n ":
extracted_json['keywords'] = []
if 'keywords' in extracted_json.keys() and \
type(extracted_json['keywords']) == OrderedDict and \
'keyword' in extracted_json['keywords'].keys():
if type(extracted_json['keywords']['keyword']) != list:
extracted_json['keywords'] = [extracted_json['keywords']['keyword']]
else:
extracted_json['keywords'] = extracted_json['keywords']['keyword']
if 'organization' in extracted_json.keys() and \
type(extracted_json['organization']) == OrderedDict and \
'organization' in extracted_json['organization'].keys():
if type(extracted_json['organization']['organization']) != list:
extracted_json['organization'] = [extracted_json['organization'][
'organization']]
else:
extracted_json['organization'] = extracted_json['organization'][
'organization']
if 'other' not in extracted_json.keys() \
or extracted_json['other'] is None:
extracted_json['other'] = {}
if 'related-publications' in extracted_json.keys() and \
type(extracted_json['related-publications']) == OrderedDict and \
'publication' in extracted_json['related-publications'].keys():
if type(extracted_json['related-publications']['publication']) != list:
extracted_json['other']['related-publications'] = [
extracted_json['related-publications']['publication']]
else:
extracted_json['other']['related-publications'] = \
extracted_json['related-publications']['publication']
if 'related-publications' in extracted_json.keys():
extracted_json.pop('related-publications')
for field in extracted_json.keys():
if extracted_json[field] is None:
extracted_json[field] = ""
if field == "primaryContact":
extracted_json["primary_contact"] = extracted_json["primaryContact"]
del extracted_json["primaryContact"]
return extracted_json
def make_validation(self):
""" Method to run the mapping for the given number of items
:return: a dictionary containing the list of errors for all processed items
"""
valid = {}
invalid = {}
user_accessible_ids = self.get_user_content_id()
# print(user_accessible_ids)
if isinstance(user_accessible_ids, Exception):
return Exception("Error with client ID " + self.clientID)
else:
schema = json.loads(requests.get(self.schema_url).text)
resolver = RefResolver(self.schema_url, schema, {})
validator = Draft4Validator(schema, resolver=resolver)
content = self.get_all_experiments(self.item_number, user_accessible_ids)
for raw_experiment in content:
experiment = self.preprocess_content(content[raw_experiment])
try:
validation = validator.validate(experiment)
if validation is None:
valid[raw_experiment] = experiment
else:
invalid[raw_experiment] = validation
except Exception as e:
invalid[raw_experiment] = "Unexpected error: " + str(e)
return valid, invalid
|
[
"json.dump",
"json.load",
"xml.etree.ElementTree.fromstring",
"jsonbender.bend",
"os.path.dirname",
"jsonbender.S",
"jsonschema.validators.Draft4Validator",
"jsonbender.K",
"jsonbender.OptionalS",
"jsonschema.validators.RefResolver",
"jsonbender.F",
"requests.get",
"requests.request"
] |
[((1818, 1851), 'requests.request', 'requests.request', (['"""GET"""', 'full_url'], {}), "('GET', full_url)\n", (1834, 1851), False, 'import requests\n'), ((2816, 2849), 'requests.request', 'requests.request', (['"""GET"""', 'full_url'], {}), "('GET', full_url)\n", (2832, 2849), False, 'import requests\n'), ((8764, 8809), 'jsonbender.bend', 'jsonbender.bend', (['mapping', 'experience_metadata'], {}), '(mapping, experience_metadata)\n', (8779, 8809), False, 'import jsonbender\n'), ((11804, 11844), 'jsonschema.validators.RefResolver', 'RefResolver', (['self.schema_url', 'schema', '{}'], {}), '(self.schema_url, schema, {})\n', (11815, 11844), False, 'from jsonschema.validators import RefResolver, Draft4Validator\n'), ((11869, 11911), 'jsonschema.validators.Draft4Validator', 'Draft4Validator', (['schema'], {'resolver': 'resolver'}), '(schema, resolver=resolver)\n', (11884, 11911), False, 'from jsonschema.validators import RefResolver, Draft4Validator\n'), ((2031, 2065), 'xml.etree.ElementTree.fromstring', 'elemTree.fromstring', (['response.text'], {}), '(response.text)\n', (2050, 2065), True, 'import xml.etree.ElementTree as elemTree\n'), ((4239, 4264), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (4254, 4264), False, 'import os\n'), ((4420, 4453), 'json.dump', 'dump', (['instance', 'outfile'], {'indent': '(4)'}), '(instance, outfile, indent=4)\n', (4424, 4453), False, 'from json import dump, load\n'), ((5294, 5311), 'json.load', 'load', (['mapping_var'], {}), '(mapping_var)\n', (5298, 5311), False, 'from json import dump, load\n'), ((4552, 4577), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (4567, 4577), False, 'import os\n'), ((4666, 4691), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (4681, 4691), False, 'import os\n'), ((5607, 5653), 'jsonbender.OptionalS', 'jsonbender.OptionalS', (['raw_mapping[mapped_item]'], {}), '(raw_mapping[mapped_item])\n', (5627, 5653), False, 'import jsonbender\n'), ((7361, 7391), 'requests.get', 'requests.get', (['self.mapping_url'], {}), '(self.mapping_url)\n', (7373, 7391), False, 'import requests\n'), ((8659, 8687), 'xml.etree.ElementTree.fromstring', 'elemTree.fromstring', (['content'], {}), '(content)\n', (8678, 8687), True, 'import xml.etree.ElementTree as elemTree\n'), ((11745, 11774), 'requests.get', 'requests.get', (['self.schema_url'], {}), '(self.schema_url)\n', (11757, 11774), False, 'import requests\n'), ((6347, 6402), 'jsonbender.OptionalS', 'jsonbender.OptionalS', (["raw_mapping[mapped_item]['value']"], {}), "(raw_mapping[mapped_item]['value'])\n", (6367, 6402), False, 'import jsonbender\n'), ((6542, 6589), 'jsonbender.S', 'jsonbender.S', (["raw_mapping[mapped_item]['value']"], {}), "(raw_mapping[mapped_item]['value'])\n", (6554, 6589), False, 'import jsonbender\n'), ((6724, 6771), 'jsonbender.K', 'jsonbender.K', (["raw_mapping[mapped_item]['value']"], {}), "(raw_mapping[mapped_item]['value'])\n", (6736, 6771), False, 'import jsonbender\n'), ((6906, 6953), 'jsonbender.F', 'jsonbender.F', (["raw_mapping[mapped_item]['value']"], {}), "(raw_mapping[mapped_item]['value'])\n", (6918, 6953), False, 'import jsonbender\n')]
|
# -*- coding: utf-8 -*-
import os
import sys
import locale
import platform
import subprocess
import darkdetect
try:
import winreg
except Exception:
pass
from pathlib import Path
from PySide2.QtCore import qVersion
DEFAULT_ENCODING = 'utf-8'
SYSTEM_LANGUAGE = locale.getdefaultlocale()[0]
PYTHON_VERSION = '.'.join(str(v) for v in sys.version_info[:3])
QT_VERSION = qVersion()
def is_windows():
return platform.system() == 'Windows'
def is_macos():
return platform.system() == 'Darwin'
def is_linux():
return platform.system() == 'Linux'
def open_file(file_path, timeout=3):
if is_windows():
os.startfile(file_path)
elif is_macos():
subprocess.call(['open', file_path], timeout=timeout)
elif is_linux():
subprocess.call(['xdg-open', file_path], timeout=timeout)
else:
return
def open_directory(directory, timeout=3):
if is_windows():
subprocess.call(['explorer', str(Path(directory))], timeout=timeout)
elif is_macos():
subprocess.call(['open', directory], timeout=timeout)
elif is_linux():
subprocess.call(['xdg-open', directory], timeout=timeout)
else:
return
def is_system_dark_theme():
if is_macos():
return darkdetect.isDark()
if is_windows():
k = winreg.OpenKey(
winreg.HKEY_CURRENT_USER, 'Software\\Microsoft\\Windows\\CurrentVersion\\Themes\\Personalize')
try:
return winreg.QueryValueEx(k, 'AppsUseLightTheme')[0] == 0
except Exception:
pass
return False
def is_text_file(path: str) -> bool:
try:
with open(path, 'rt') as f:
f.readlines()
except:
return False
return True
__all__ = [
'DEFAULT_ENCODING',
'SYSTEM_LANGUAGE',
'PYTHON_VERSION',
'QT_VERSION',
'is_windows',
'is_macos',
'is_linux',
'open_file',
'open_directory',
'is_system_dark_theme',
'is_text_file'
]
|
[
"PySide2.QtCore.qVersion",
"locale.getdefaultlocale",
"winreg.OpenKey",
"darkdetect.isDark",
"winreg.QueryValueEx",
"pathlib.Path",
"subprocess.call",
"platform.system",
"os.startfile"
] |
[((380, 390), 'PySide2.QtCore.qVersion', 'qVersion', ([], {}), '()\n', (388, 390), False, 'from PySide2.QtCore import qVersion\n'), ((273, 298), 'locale.getdefaultlocale', 'locale.getdefaultlocale', ([], {}), '()\n', (296, 298), False, 'import locale\n'), ((422, 439), 'platform.system', 'platform.system', ([], {}), '()\n', (437, 439), False, 'import platform\n'), ((482, 499), 'platform.system', 'platform.system', ([], {}), '()\n', (497, 499), False, 'import platform\n'), ((541, 558), 'platform.system', 'platform.system', ([], {}), '()\n', (556, 558), False, 'import platform\n'), ((638, 661), 'os.startfile', 'os.startfile', (['file_path'], {}), '(file_path)\n', (650, 661), False, 'import os\n'), ((1258, 1277), 'darkdetect.isDark', 'darkdetect.isDark', ([], {}), '()\n', (1275, 1277), False, 'import darkdetect\n'), ((1312, 1425), 'winreg.OpenKey', 'winreg.OpenKey', (['winreg.HKEY_CURRENT_USER', '"""Software\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\Themes\\\\Personalize"""'], {}), "(winreg.HKEY_CURRENT_USER,\n 'Software\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\Themes\\\\Personalize')\n", (1326, 1425), False, 'import winreg\n'), ((691, 744), 'subprocess.call', 'subprocess.call', (["['open', file_path]"], {'timeout': 'timeout'}), "(['open', file_path], timeout=timeout)\n", (706, 744), False, 'import subprocess\n'), ((1028, 1081), 'subprocess.call', 'subprocess.call', (["['open', directory]"], {'timeout': 'timeout'}), "(['open', directory], timeout=timeout)\n", (1043, 1081), False, 'import subprocess\n'), ((774, 831), 'subprocess.call', 'subprocess.call', (["['xdg-open', file_path]"], {'timeout': 'timeout'}), "(['xdg-open', file_path], timeout=timeout)\n", (789, 831), False, 'import subprocess\n'), ((1111, 1168), 'subprocess.call', 'subprocess.call', (["['xdg-open', directory]"], {'timeout': 'timeout'}), "(['xdg-open', directory], timeout=timeout)\n", (1126, 1168), False, 'import subprocess\n'), ((963, 978), 'pathlib.Path', 'Path', (['directory'], {}), '(directory)\n', (967, 978), False, 'from pathlib import Path\n'), ((1467, 1510), 'winreg.QueryValueEx', 'winreg.QueryValueEx', (['k', '"""AppsUseLightTheme"""'], {}), "(k, 'AppsUseLightTheme')\n", (1486, 1510), False, 'import winreg\n')]
|
"""
Functions for interacting with the BEAST model
"""
import numpy as np
import h5py
from tqdm import tqdm
__all__ = ["read_lnp_data", "read_noise_data", "read_sed_data", "get_lnp_grid_vals"]
def read_lnp_data(filename, nstars=None, shift_lnp=True):
"""
Read in the sparse lnp for all the stars in the hdf5 file
Parameters
----------
filename : string
name of the file with the sparse lnp values
nstars : int (default=None)
if you want to check that the number of lnp values is correct, set this
to the number of stars expected in the file
shift_lnp : boolean (default=True)
if True, shift lnp values to have a max of 0.0
Returns
-------
lnp_data : dictonary
contains arrays of the lnp values and indices to the BEAST model grid
"""
with h5py.File(filename, "r") as lnp_hdf:
# get keyword names for the stars (as opposed to filter info)
star_key_list = [sname for sname in lnp_hdf.keys() if "star" in sname]
tot_stars = len(star_key_list)
if nstars is not None:
if tot_stars != nstars:
raise ValueError(
"Error: number of stars not equal between nstars image and lnp file"
)
# initialize arrays
# - find the lengths of the sparse likelihoods
lnp_sizes = [lnp_hdf[sname]["lnp"][()].shape[0] for sname in star_key_list]
# - set arrays to the maximum size
lnp_vals = np.full((np.max(lnp_sizes), tot_stars), -np.inf)
lnp_indxs = np.full((np.max(lnp_sizes), tot_stars), np.nan)
# loop over all the stars (groups)
for k, sname in enumerate(star_key_list):
lnp_vals[: lnp_sizes[k], k] = lnp_hdf[sname]["lnp"][()]
lnp_indxs[: lnp_sizes[k], k] = np.array(lnp_hdf[sname]["idx"][()])
if shift_lnp:
# shift the log(likelihood) values to have a max of 0.0
# ok if the same shift is applied to all stars in a pixel
# avoids numerical issues later when we go to intergrate probs
lnp_vals -= np.max(lnp_vals)
return {"vals": lnp_vals, "indxs": lnp_indxs}
def read_noise_data(
filename,
param_list=["bias", "completeness", "error"],
filter_col=None,
):
"""
Read some or all of the noise model parameters, for one or all of the filters
Parameters
----------
filename : string
name of the file with the BEAST observationmodel grid
param_list : list of strings
the set of parameters to extract
filter_col : int (default=None)
if set, only return the data for this column number
Returns
-------
noise_data : dictonary
contains arrays of the noise parameters
"""
noise_data = {}
# open files for reading
with h5py.File(filename, "r") as noise_hdf:
# get beast physicsmodel params
for param in tqdm(param_list, desc="reading noise data"):
if filter_col is None:
noise_data[param] = np.array(noise_hdf[param])
else:
noise_data[param] = noise_hdf[param][:, filter_col]
return noise_data
def read_sed_data(
filename,
param_list=["Av", "Rv", "f_A", "M_ini", "logA", "Z", "distance"],
return_params=False,
):
"""
Read in the beast data needed by all the pixels
Parameters
----------
filename : string
name of the file with the BEAST physicsmodel grid
param_list : list of strings
The set of parameters to extract (default: Av, Rv, f_A, M_ini, logA, Z,
distance). If set to 'all', extract all parameters and model fluxes in
the grid.
return_params : boolean (default=False)
If True, return the list of keywords for all parameters and model fluxes
in the grid. Useful for checking what columns are present.
Returns
-------
Two possible returns depending on param_list input
sed_data : dictonary (param_list input as list of strings)
contains arrays of the requested SED grid parameters
if return_params is True, then also provides
grid_param_list : list of strings (return_params is True)
if param_list is None, return the list of parameter options
"""
sed_data = {}
# open files for reading
with h5py.File(filename, "r") as sed_hdf:
# get the possible list of parameters
grid_param_list = list(sed_hdf["grid"][()].dtype.names)
# return that if the user is so inclined
if return_params:
return grid_param_list + ["seds", "lamb", "filters"]
if param_list == "all":
param_list = grid_param_list
# get parameters
for param in tqdm(param_list, desc="reading sed data"):
# grid parameter
if param in grid_param_list:
sed_data[param] = sed_hdf["grid"][param]
# wavelengths of the filters -or- SED photometry values
elif (param == "lamb") or (param == "seds"):
sed_data[param] = sed_hdf[param][()]
elif param == "filters":
filtstr = sed_hdf["grid"].attrs["filters"]
if isinstance(filtstr, bytes):
filtstr = filtstr.decode()
sed_data[param] = filtstr.split(" ")
else:
raise ValueError("parameter {0} not found in SED grid".format(param))
return sed_data
def get_lnp_grid_vals(sed_data, lnp_data, verbose=False):
"""
Acquire the SED parameter values for the locations where the lnp values
were saved
Parameters
----------
sed_data : dictonary or string
if dictionary: contains arrays of the beast parameters (output from
read_sed_data)
if string: name of the file with the BEAST physicsmodel grid, which will
be used in read_sed_data to get default parameters
lnp_data : dictonary or string
if dictionary: contains arrays of the lnp values and indices to the BEAST
model grid (output from read_lnp_data)
if string: name of the file with the sparse lnp values, which will be
used in read_lnp_data with default parameters
Returns
-------
lnp_grid_vals : dictonary
arrays of the SED grid parameters for the points in the lnp lists
"""
if isinstance(sed_data, str):
sed_data = read_sed_data(sed_data)
if isinstance(lnp_data, str):
lnp_data = read_lnp_data(lnp_data)
# get the keys in beast_data
param_list = sed_data.keys()
# setup the output
lnp_grid_vals = {}
n_lnps, n_stars = lnp_data["indxs"].shape
for param in param_list:
lnp_grid_vals[param] = np.zeros((n_lnps, n_stars), dtype=float)
# loop over the stars and extract the requested BEAST data
for k in tqdm(
range(n_stars), desc="extracting params for each lnP", disable=not verbose
):
lnp_inds = lnp_data["indxs"][:, k]
good_inds = np.isfinite(lnp_inds)
for param in param_list:
lnp_grid_vals[param][good_inds, k] = sed_data[param][
lnp_inds[good_inds].astype(int)
]
return lnp_grid_vals
|
[
"h5py.File",
"tqdm.tqdm",
"numpy.zeros",
"numpy.isfinite",
"numpy.max",
"numpy.array"
] |
[((833, 857), 'h5py.File', 'h5py.File', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (842, 857), False, 'import h5py\n'), ((2838, 2862), 'h5py.File', 'h5py.File', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (2847, 2862), False, 'import h5py\n'), ((2939, 2982), 'tqdm.tqdm', 'tqdm', (['param_list'], {'desc': '"""reading noise data"""'}), "(param_list, desc='reading noise data')\n", (2943, 2982), False, 'from tqdm import tqdm\n'), ((4355, 4379), 'h5py.File', 'h5py.File', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (4364, 4379), False, 'import h5py\n'), ((4763, 4804), 'tqdm.tqdm', 'tqdm', (['param_list'], {'desc': '"""reading sed data"""'}), "(param_list, desc='reading sed data')\n", (4767, 4804), False, 'from tqdm import tqdm\n'), ((6744, 6784), 'numpy.zeros', 'np.zeros', (['(n_lnps, n_stars)'], {'dtype': 'float'}), '((n_lnps, n_stars), dtype=float)\n', (6752, 6784), True, 'import numpy as np\n'), ((7021, 7042), 'numpy.isfinite', 'np.isfinite', (['lnp_inds'], {}), '(lnp_inds)\n', (7032, 7042), True, 'import numpy as np\n'), ((1820, 1855), 'numpy.array', 'np.array', (["lnp_hdf[sname]['idx'][()]"], {}), "(lnp_hdf[sname]['idx'][()])\n", (1828, 1855), True, 'import numpy as np\n'), ((2118, 2134), 'numpy.max', 'np.max', (['lnp_vals'], {}), '(lnp_vals)\n', (2124, 2134), True, 'import numpy as np\n'), ((1507, 1524), 'numpy.max', 'np.max', (['lnp_sizes'], {}), '(lnp_sizes)\n', (1513, 1524), True, 'import numpy as np\n'), ((1576, 1593), 'numpy.max', 'np.max', (['lnp_sizes'], {}), '(lnp_sizes)\n', (1582, 1593), True, 'import numpy as np\n'), ((3055, 3081), 'numpy.array', 'np.array', (['noise_hdf[param]'], {}), '(noise_hdf[param])\n', (3063, 3081), True, 'import numpy as np\n')]
|
import math
import numpy as np
from collections import namedtuple
import random
from cyclopts import cyclopts_io as cycio
from cyclopts.structured_species import data
"""default values and np.dtypes for points making up parameter space"""
Param = namedtuple('Param', ['val', 'dtype'])
class Point(object):
"""A container class representing a point in parameter space"""
def __init__(self, d=None):
"""Parameters
----------
d : dict, optional
a dictionary with key value pairs of parameter name, parameter
value
"""
d = d if d is not None else {}
# init with dict-specified value else default
for name, param in self._parameters().items():
val = d[name] if name in d else param.val
setattr(self, name, val)
def _parameters(self):
"""subclasses must implement their parameter mapping"""
return NotImplemented
def __eq__(self, other):
return (isinstance(other, self.__class__) \
and self.__dict__ == other.__dict__)
def __ne__(self, other):
return not self.__eq__(other)
def mean_enr(rxtr, commod):
"""the mean enrichment for a reactor and commodity"""
return np.mean(data.enr_ranges[rxtr][commod])
def conv_ratio(kind):
"""provides the inventory to process conversion ratio for given support"""
commod, rxtr = data.sup_to_commod[kind], data.sup_to_rxtr[kind]
enr = mean_enr(rxtr, commod)
return data.converters[kind]['inv'](1.0, enr, commod) / \
data.converters[kind]['proc'](1.0, enr, commod)
def region(loc, n_reg=1):
"""assumes loc is on [0, 1]"""
return int(math.floor(n_reg * loc))
def loc_pref(r_loc, s_loc, loc_fidelity=0, n_reg=1):
"""returns the location-based preference between a requester and supplier
for a commodity"""
loc_pref = 0
if loc_fidelity > 0: # at least coarse
rreg = region(r_loc, n_reg=n_reg)
sreg = region(s_loc, n_reg=n_reg)
loc_pref = math.exp(-np.abs(rreg - sreg))
if loc_fidelity > 1: # fine
loc_pref = (loc_pref + math.exp(-np.abs(r_loc - s_loc))) / 2
return loc_pref
def reactor_breakdown(point):
"""Returns
-------
n_uox, n_mox, n_thox : tuple
the number of each reactor type
"""
n_rxtr = point.n_rxtr
fidelity = point.f_fc
r_t_f = point.r_t_f # thermal to fast
r_th_pu = point.r_th_pu # thox to mox
n_uox, n_mox, n_thox = 0, 0, 0
if fidelity == 0: # once through
n_uox = max(n_rxtr, 1)
elif fidelity == 1: # uox + fast mox
n_uox = max(int(round(r_t_f * n_rxtr)), 1)
n_mox = max(n_rxtr - n_uox, 1)
else: # uox + fast mox + fast thox
n_uox = max(int(round(r_t_f * n_rxtr)), 1)
n_thox = max(int(round(r_th_pu * (n_rxtr - n_uox))), 1)
n_mox = max(n_rxtr - n_uox - n_thox, 1)
return n_uox, n_mox, n_thox
def support_breakdown(point):
"""Returns
-------
n_uox, n_mox, n_thox, n_repo : tuple
the number of each support type
"""
n_uox_r, n_mox_r, n_thox_r = reactor_breakdown(point)
n_uox, n_t_mox, n_f_mox, n_f_thox, n_repo = 0, 0, 0, 0, 0
fidelity = point.f_fc
# number thermal supports
if fidelity == 0: # once through - only uox
n_uox = max(int(round(point.r_s_th * n_uox_r)), 1)
else:
n_s_t = max(int(round(point.r_s_th * n_uox_r)), 1)
n_uox = max(int(round(n_s_t / (1.0 + point.r_s_mox_uox))), 1)
n_t_mox = max(n_s_t - n_uox, 1)
# number f_mox supports
if fidelity > 0:
n_f_mox = max(int(round(point.r_s_mox * n_mox_r)), 1)
# number f_thox supports
if fidelity > 1:
n_f_thox = max(int(round(point.r_s_thox * n_thox_r)), 1)
if hasattr(point, 'r_repo'):
n_repo = max(int(round(sum([n_uox, n_t_mox, n_f_mox, n_f_thox]) * \
point.r_repo)), 1)
return n_uox, n_t_mox, n_f_mox, n_f_thox, n_repo
def assembly_roulette(fracs):
"""In the case where this is only one assembly (i.e., low reactor fidelity),
this method chooses the index
Parameters
----------
fracs : list
the assembly distribution, assumed to be normalized
Returns
-------
idx : int
the chosen list index
"""
rnd = random.uniform(0, 1)
cum_sum = 0
for i in range(len(fracs)):
cum_sum += fracs[i]
if rnd <= cum_sum:
return i
def assembly_breakdown(point, kind):
"""Returns
-------
assems : dict
a dictionary from commodity types to the number of assemblies
"""
if kind == data.Reactors.th:
fracs = point.d_th
elif kind == data.Reactors.f_mox:
fracs = point.d_f_mox
elif kind == data.Reactors.f_thox:
fracs = point.d_f_thox
denom = float(sum(fracs))
fracs = [x / denom for x in fracs]
if point.f_rxtr == 0: # one 'assembly', i.e. a batch
ret = [0] * len(fracs)
ret[assembly_roulette(fracs)] = 1
else: # full assemblies
nassems = data.n_assemblies[kind]
ret = [int(round(x * nassems)) for x in fracs]
diff = sum(ret) - nassems
if diff != 0: # adjust largest amount to give exactly nassems
ret[ret.index(max(ret))] -= diff
return {data.Commodities[i]: ret[i] for i in range(len(ret))}
class Reactor(object):
"""A simplified reactor model for Structured Species"""
def __init__(self, kind, point=None, n_assems=None):
self.kind = kind
if point is not None:
self.n_assems = 1 if point.f_rxtr == 0 else data.n_assemblies[kind]
elif n_assems is not None:
self.n_assems = n_assems
self.enr_rnd = random.uniform(0, 1)
self.loc = data.loc()
def enr(self, commod):
# node quantity takes into account relative fissile material
lb, ub = data.enr_ranges[self.kind][commod]
return (ub - lb) * self.enr_rnd + lb
def coeffs(self, commod):
return [1 / data.relative_qtys[self.kind][commod]]
"""Structured Arc Table Members"""
arc_io_name = "Arcs"
arc_tbl_dtype = np.dtype(
[('arcid', np.uint32), ('commod', np.uint32),
('pref_c', np.float32), ('pref_l', np.float32)])
"""Structured Post-Processing Table Members"""
pp_tbl_name = "PostProcess"
pp_tbl_dtype = np.dtype(
[('solnid', ('str', 16)), ('c_pref_flow', np.float64),
('l_pref_flow', np.float64)])
def tbl_descs(io_prefix):
return [
cycio.TblDesc('/'.join([io_prefix, pp_tbl_name]), 'soln', 'solnid'),
]
def _iid_to_prefs(iid, tbl, narcs, strategy='col'):
"""return a numpy array of preferences"""
if strategy == 'grp':
return tbl.read(field='pref_c'), tbl.read(field='pref_l')
# otherwise, do column strat
c_ret = np.zeros(narcs)
l_ret = np.zeros(narcs)
rows = cycio.uuid_rows(tbl, iid)
for x in rows:
aid = x['arcid']
c_ret[aid] = x['pref_c']
l_ret[aid] = x['pref_l']
return c_ret, l_ret
def _pp_work(instid, solnids, narcs, sid_to_flows, arc_tbl, strategy='col'):
c_prefs, l_prefs = _iid_to_prefs(instid, arc_tbl, narcs, strategy=strategy)
data = []
for sid, flows in sid_to_flows.items():
c_pref_flow = np.dot(c_prefs, flows)
l_pref_flow = np.dot(l_prefs, flows)
data.append((sid.bytes, c_pref_flow, l_pref_flow))
return data
def post_process(instid, solnids, props, io_managers, sp_name):
"""Perform any post processing on input and output.
Parameters
----------
instid : UUID
UUID of the instance to post process
solnids : tuple of UUIDs
a collection of solution UUIDs corresponding the instid
props : tuple, other
as defined by cyclopts.exchange_family
io_managers : tuple of cyclopts.cyclopts_io.IOManager
iomanager from an input file, iomanager from an output file,
and iomanager from a post-processed file
sp_name : str
the name of the species being post processed
"""
intbls, outtbls, pptbls = (m.tables for m in io_managers)
ingrps, outgrps, ppgrps = (m.groups for m in io_managers)
narcs, sid_to_flows = props
pp_tbl = pptbls[pp_tbl_name]
if arc_io_name in intbls.keys():
arc_tbl = intbls[arc_io_name]
strategy = 'col'
else:
arc_tbl = ingrps[arc_io_name].group()._f_get_child('id_' +
instid.hex)
strategy = 'grp'
data = _pp_work(instid, solnids, narcs, sid_to_flows, arc_tbl,
strategy=strategy)
pp_tbl.append_data(data)
|
[
"numpy.abs",
"random.uniform",
"numpy.dtype",
"numpy.zeros",
"cyclopts.cyclopts_io.uuid_rows",
"math.floor",
"cyclopts.structured_species.data.append",
"numpy.mean",
"collections.namedtuple",
"numpy.dot",
"cyclopts.structured_species.data.loc"
] |
[((249, 286), 'collections.namedtuple', 'namedtuple', (['"""Param"""', "['val', 'dtype']"], {}), "('Param', ['val', 'dtype'])\n", (259, 286), False, 'from collections import namedtuple\n'), ((6197, 6305), 'numpy.dtype', 'np.dtype', (["[('arcid', np.uint32), ('commod', np.uint32), ('pref_c', np.float32), (\n 'pref_l', np.float32)]"], {}), "([('arcid', np.uint32), ('commod', np.uint32), ('pref_c', np.\n float32), ('pref_l', np.float32)])\n", (6205, 6305), True, 'import numpy as np\n'), ((6402, 6500), 'numpy.dtype', 'np.dtype', (["[('solnid', ('str', 16)), ('c_pref_flow', np.float64), ('l_pref_flow', np.\n float64)]"], {}), "([('solnid', ('str', 16)), ('c_pref_flow', np.float64), (\n 'l_pref_flow', np.float64)])\n", (6410, 6500), True, 'import numpy as np\n'), ((1262, 1300), 'numpy.mean', 'np.mean', (['data.enr_ranges[rxtr][commod]'], {}), '(data.enr_ranges[rxtr][commod])\n', (1269, 1300), True, 'import numpy as np\n'), ((4358, 4378), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (4372, 4378), False, 'import random\n'), ((6871, 6886), 'numpy.zeros', 'np.zeros', (['narcs'], {}), '(narcs)\n', (6879, 6886), True, 'import numpy as np\n'), ((6899, 6914), 'numpy.zeros', 'np.zeros', (['narcs'], {}), '(narcs)\n', (6907, 6914), True, 'import numpy as np\n'), ((6926, 6951), 'cyclopts.cyclopts_io.uuid_rows', 'cycio.uuid_rows', (['tbl', 'iid'], {}), '(tbl, iid)\n', (6941, 6951), True, 'from cyclopts import cyclopts_io as cycio\n'), ((1699, 1722), 'math.floor', 'math.floor', (['(n_reg * loc)'], {}), '(n_reg * loc)\n', (1709, 1722), False, 'import math\n'), ((5788, 5808), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (5802, 5808), False, 'import random\n'), ((5829, 5839), 'cyclopts.structured_species.data.loc', 'data.loc', ([], {}), '()\n', (5837, 5839), False, 'from cyclopts.structured_species import data\n'), ((7324, 7346), 'numpy.dot', 'np.dot', (['c_prefs', 'flows'], {}), '(c_prefs, flows)\n', (7330, 7346), True, 'import numpy as np\n'), ((7369, 7391), 'numpy.dot', 'np.dot', (['l_prefs', 'flows'], {}), '(l_prefs, flows)\n', (7375, 7391), True, 'import numpy as np\n'), ((7400, 7450), 'cyclopts.structured_species.data.append', 'data.append', (['(sid.bytes, c_pref_flow, l_pref_flow)'], {}), '((sid.bytes, c_pref_flow, l_pref_flow))\n', (7411, 7450), False, 'from cyclopts.structured_species import data\n'), ((2053, 2072), 'numpy.abs', 'np.abs', (['(rreg - sreg)'], {}), '(rreg - sreg)\n', (2059, 2072), True, 'import numpy as np\n'), ((2152, 2173), 'numpy.abs', 'np.abs', (['(r_loc - s_loc)'], {}), '(r_loc - s_loc)\n', (2158, 2173), True, 'import numpy as np\n')]
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import uuid
import html
import typing as t
import json
from pathlib import Path
import IPython.display
from ipykernel.comm import Comm
from . import experiment as exp
from .render import escapejs, make_experiment_standalone_page
class GetSelectedFailure(Exception):
pass
class NotebookJSBundleInjector:
"""
TODO: Maybe we should do something smart here. Like inject only once?
But how to be robust to the user clearing the output of the cell where we injected the bundle?
If he then refreshes the page, HiPlot bundle is no longer injected...
"""
@classmethod
def ensure_injected(cls) -> None:
bundle = Path(__file__).parent / "static" / "built" / "hiplot.bundle.js"
IPython.display.display(IPython.display.Javascript(bundle.read_text("utf-8")))
def jupyter_make_full_width(content: str) -> str:
w_id = f"wrap_html_{uuid.uuid4().hex[:6]}"
return f"""
<div id="{w_id}">{content}</div>
<script type="text/javascript">
(function() {{
const elem = document.getElementById({json.dumps(w_id)});
elem.style.width = "100vw";
const removeElems = elem.parentElement.parentElement.getElementsByClassName("prompt");
for (var i = 0; i < removeElems.length; ++i) {{
removeElems[i].remove();
}}
elem.parentElement.style.overflowX = "visible";
const scale_to_100pct_screen = function() {{
elem.style.marginLeft = '0px';
elem.style.marginLeft = (- elem.getBoundingClientRect().x) + 'px';
}};
window.addEventListener('resize', function() {{
scale_to_100pct_screen();
}});
scale_to_100pct_screen();
}})();
</script>
"""
class IPythonExperimentDisplayed(exp.ExperimentDisplayed):
def __init__(self, xp: exp.Experiment, comm_name: str) -> None:
self._exp = xp
self._num_recv = 0
self._selected_ids: t.List[str] = []
self._last_data_per_type: t.Dict[str, t.Any] = {}
def target_func(comm: Comm, open_msg: t.Dict[str, t.Any]) -> None: # pylint: disable=unused-argument
# comm is the kernel Comm instance
# msg is the comm_open message
# Register handler for later messages
@comm.on_msg # type: ignore
def _recv(msg: t.Dict[str, t.Any]) -> None:
self._num_recv += 1
msg_data = msg["content"]["data"]
print(msg_data)
self._last_data_per_type[msg_data["type"]] = msg_data["data"]
try:
ip: Any = get_ipython() # type: ignore # pylint: disable=undefined-variable
ip.kernel.comm_manager.register_target(comm_name, target_func)
except NameError: # NameError: name 'get_ipython' is not defined
# We are not in an ipython environment - for example in testing
pass
no_data_received_error = GetSelectedFailure(
"""No data received from the front-end. Please make sure that:
1. You don't call "get_selected" on the same cell
2. The interface has loaded
3. You are in a Jupyter notebook (Jupyter lab is *not* supported)"""
)
def get_selected(self) -> t.List[exp.Datapoint]:
last_selection = self._last_data_per_type.get("selection")
if last_selection is None:
raise self.no_data_received_error
selected_set = set(last_selection["selected"])
datapoints = [i for i in self._exp.datapoints if i.uid in selected_set]
assert len(datapoints) == len(selected_set)
return datapoints
def get_brush_extents(self) -> t.Dict[str, t.Dict[str, t.Any]]:
last_msg = self._last_data_per_type.get("brush_extents")
if last_msg is None:
raise self.no_data_received_error
return last_msg # type: ignore
def display_exp(
xp: exp.Experiment,
force_full_width: bool = False,
store_state_url: t.Optional[str] = None
) -> IPythonExperimentDisplayed:
comm_id = f"comm_{uuid.uuid4().hex[:6]}"
displayed_xp = IPythonExperimentDisplayed(xp, comm_id)
options: t.Dict[str, t.Any] = {
'experiment': xp._asdict()
}
if store_state_url is not None:
options.update({"persistent_state_url_prefix": store_state_url})
else:
options.update({"persistent_state": None})
index_html = make_experiment_standalone_page(options=options)
# Remove line that references the script bundle - prevents an HTTP error in the notebook
index_html = index_html.replace('src="static/built/hiplot.bundle.js"', '')
index_html = index_html.replace(
"/*ON_LOAD_SCRIPT_INJECT*/",
f"""/*ON_LOAD_SCRIPT_INJECT*/
const comm_id = {escapejs(comm_id)};
try {{
console.log("Setting up communication channel with Jupyter: ", comm_id);
var comm = Jupyter.notebook.kernel.comm_manager.new_comm(comm_id, {{'type': 'hello'}});
Object.assign(options, {{"comm": comm}});
}}
catch(err) {{
console.warn('Unable to create Javascript <-> Python communication channel' +
' (are you in a Jupyter notebook? Jupyter labs is *not* supported!)');
}}
""")
if force_full_width:
index_html = jupyter_make_full_width(index_html)
NotebookJSBundleInjector.ensure_injected()
IPython.display.display(IPython.display.HTML(index_html))
return displayed_xp
|
[
"pathlib.Path",
"uuid.uuid4",
"json.dumps"
] |
[((1228, 1244), 'json.dumps', 'json.dumps', (['w_id'], {}), '(w_id)\n', (1238, 1244), False, 'import json\n'), ((1055, 1067), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1065, 1067), False, 'import uuid\n'), ((4134, 4146), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4144, 4146), False, 'import uuid\n'), ((828, 842), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (832, 842), False, 'from pathlib import Path\n')]
|
import falcon
import logging
import json
from sikre.models.models import User
class LoginResource(object):
"""
The login resource handles the login from all the
"""
def __init__(self):
self.logger = logging.getLogger('thingsapp.' + __name__)
def on_get(self, request, response):
raise falcon.HTTPError(falcon.HTTP_405, "Client error",
"The GET method is not allowed in this endpoint.")
def on_post(self, request, response, **kwargs):
"""
This method will check that the email and the token match, then logs
in the user.
"""
print(request.get_param('email'))
try:
user = User.get(username=request.get_param('login-email'))
valid = user.check_password(request.body['login-password'])
print("WHATEVER: " + request)
if valid:
pass
response.status = falcon.HTTP_200
response.body = 'Logged in'
except:
raise falcon.HTTPError(falcon.HTTP_500)
def on_put(self, request, response):
raise falcon.HTTPError(falcon.HTTP_405, "Client error",
"The GET method is not allowed in this endpoint.")
def on_update(self, request, response):
raise falcon.HTTPError(falcon.HTTP_405, "Client error",
"The UPDATE method is not allowed in this endpoint.")
def on_delete(self, request, response):
raise falcon.HTTPError(falcon.HTTP_405, "Client error",
"The DELETE method is not allowed in this endpoint.")
def on_options(self, request, response):
'''
Handles all OPTIONS requests.
Returns status code 200.
'''
resp.status = falcon.HTTP_200
class LogoutResource(object):
"""
The login resource handles the login from all the
"""
def on_get(self, request, response):
raise falcon.HTTPError(falcon.HTTP_405, "Client error",
"The GET method is not allowed in this endpoint.")
def on_post(self, request, response):
"""
This method will check that the email and the token match, then logs
in the user.
"""
response.status = falcon.HTTP_200
response.body = 'whatever, man'
def on_put(self, request, response):
raise falcon.HTTPError(falcon.HTTP_405, "Client error",
"The GET method is not allowed in this endpoint.")
def on_update(self, request, response):
raise falcon.HTTPError(falcon.HTTP_405, "Client error",
"The UPDATE method is not allowed in this endpoint.")
def on_delete(self, request, response):
raise falcon.HTTPError(falcon.HTTP_405, "Client error",
"The DELETE method is not allowed in this endpoint.")
class ForgotPasswordResource(object):
"""
The login resource handles the login from all the
"""
def on_get(self, request, response):
raise falcon.HTTPError(falcon.HTTP_405, "Client error",
"The GET method is not allowed in this endpoint.")
def on_post(self, request, response, provider):
"""
This method will check that the email and the token match, then logs
in the user.
"""
response.status = falcon.HTTP_200
response.body = 'whatever, man'
def on_put(self, request, response):
raise falcon.HTTPError(falcon.HTTP_405, "Client error",
"The GET method is not allowed in this endpoint.")
def on_update(self, request, response):
raise falcon.HTTPError(falcon.HTTP_405, "Client error",
"The UPDATE method is not allowed in this endpoint.")
def on_delete(self, request, response):
raise falcon.HTTPError(falcon.HTTP_405, "Client error",
"The DELETE method is not allowed in this endpoint.")
|
[
"falcon.HTTPError",
"logging.getLogger"
] |
[((227, 269), 'logging.getLogger', 'logging.getLogger', (["('thingsapp.' + __name__)"], {}), "('thingsapp.' + __name__)\n", (244, 269), False, 'import logging\n'), ((326, 430), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_405', '"""Client error"""', '"""The GET method is not allowed in this endpoint."""'], {}), "(falcon.HTTP_405, 'Client error',\n 'The GET method is not allowed in this endpoint.')\n", (342, 430), False, 'import falcon\n'), ((1128, 1232), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_405', '"""Client error"""', '"""The GET method is not allowed in this endpoint."""'], {}), "(falcon.HTTP_405, 'Client error',\n 'The GET method is not allowed in this endpoint.')\n", (1144, 1232), False, 'import falcon\n'), ((1319, 1426), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_405', '"""Client error"""', '"""The UPDATE method is not allowed in this endpoint."""'], {}), "(falcon.HTTP_405, 'Client error',\n 'The UPDATE method is not allowed in this endpoint.')\n", (1335, 1426), False, 'import falcon\n'), ((1513, 1620), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_405', '"""Client error"""', '"""The DELETE method is not allowed in this endpoint."""'], {}), "(falcon.HTTP_405, 'Client error',\n 'The DELETE method is not allowed in this endpoint.')\n", (1529, 1620), False, 'import falcon\n'), ((1985, 2089), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_405', '"""Client error"""', '"""The GET method is not allowed in this endpoint."""'], {}), "(falcon.HTTP_405, 'Client error',\n 'The GET method is not allowed in this endpoint.')\n", (2001, 2089), False, 'import falcon\n'), ((2422, 2526), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_405', '"""Client error"""', '"""The GET method is not allowed in this endpoint."""'], {}), "(falcon.HTTP_405, 'Client error',\n 'The GET method is not allowed in this endpoint.')\n", (2438, 2526), False, 'import falcon\n'), ((2613, 2720), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_405', '"""Client error"""', '"""The UPDATE method is not allowed in this endpoint."""'], {}), "(falcon.HTTP_405, 'Client error',\n 'The UPDATE method is not allowed in this endpoint.')\n", (2629, 2720), False, 'import falcon\n'), ((2807, 2914), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_405', '"""Client error"""', '"""The DELETE method is not allowed in this endpoint."""'], {}), "(falcon.HTTP_405, 'Client error',\n 'The DELETE method is not allowed in this endpoint.')\n", (2823, 2914), False, 'import falcon\n'), ((3107, 3211), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_405', '"""Client error"""', '"""The GET method is not allowed in this endpoint."""'], {}), "(falcon.HTTP_405, 'Client error',\n 'The GET method is not allowed in this endpoint.')\n", (3123, 3211), False, 'import falcon\n'), ((3554, 3658), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_405', '"""Client error"""', '"""The GET method is not allowed in this endpoint."""'], {}), "(falcon.HTTP_405, 'Client error',\n 'The GET method is not allowed in this endpoint.')\n", (3570, 3658), False, 'import falcon\n'), ((3745, 3852), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_405', '"""Client error"""', '"""The UPDATE method is not allowed in this endpoint."""'], {}), "(falcon.HTTP_405, 'Client error',\n 'The UPDATE method is not allowed in this endpoint.')\n", (3761, 3852), False, 'import falcon\n'), ((3939, 4046), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_405', '"""Client error"""', '"""The DELETE method is not allowed in this endpoint."""'], {}), "(falcon.HTTP_405, 'Client error',\n 'The DELETE method is not allowed in this endpoint.')\n", (3955, 4046), False, 'import falcon\n'), ((1037, 1070), 'falcon.HTTPError', 'falcon.HTTPError', (['falcon.HTTP_500'], {}), '(falcon.HTTP_500)\n', (1053, 1070), False, 'import falcon\n')]
|
"""
Carto Waze Lambda Connector
Developed by Geographica, 2017-2018.
"""
import os
class Config:
"""
Configuration parameters:
- Carto API.
- WAZE API
- Traffico
"""
# Carto API
CARTO_API_KEY = os.environ.get('CARTO_API_KEY')
CARTO_USER = os.environ.get('CARTO_USER')
# Waze API
_WAZE_API_URL = os.environ.get('WAZE_API_URL')
_WAZE_TKN = os.environ.get('WAZE_TKN')
_WAZE_PARTNER_NAME = os.environ.get('WAZE_PARTNER')
_WAZE_FRMT = 'JSON'
_WAZE_TYPES = 'traffic,alerts,irregularities'
_WAZE_POLY = os.environ.get('WAZE_POLY')
WAZE_GEORSS = (
_WAZE_API_URL,
_WAZE_TKN,
_WAZE_PARTNER_NAME,
_WAZE_FRMT,
_WAZE_TYPES,
_WAZE_POLY
)
# Traffico
TRAFFICO_PREFIX = os.environ.get('TRAFFICO_PREFIX')
|
[
"os.environ.get"
] |
[((243, 274), 'os.environ.get', 'os.environ.get', (['"""CARTO_API_KEY"""'], {}), "('CARTO_API_KEY')\n", (257, 274), False, 'import os\n'), ((292, 320), 'os.environ.get', 'os.environ.get', (['"""CARTO_USER"""'], {}), "('CARTO_USER')\n", (306, 320), False, 'import os\n'), ((357, 387), 'os.environ.get', 'os.environ.get', (['"""WAZE_API_URL"""'], {}), "('WAZE_API_URL')\n", (371, 387), False, 'import os\n'), ((404, 430), 'os.environ.get', 'os.environ.get', (['"""WAZE_TKN"""'], {}), "('WAZE_TKN')\n", (418, 430), False, 'import os\n'), ((456, 486), 'os.environ.get', 'os.environ.get', (['"""WAZE_PARTNER"""'], {}), "('WAZE_PARTNER')\n", (470, 486), False, 'import os\n'), ((578, 605), 'os.environ.get', 'os.environ.get', (['"""WAZE_POLY"""'], {}), "('WAZE_POLY')\n", (592, 605), False, 'import os\n'), ((804, 837), 'os.environ.get', 'os.environ.get', (['"""TRAFFICO_PREFIX"""'], {}), "('TRAFFICO_PREFIX')\n", (818, 837), False, 'import os\n')]
|
import sys
import os
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from dccw.single_palette_sorter import *
class ComprehensiveSinglePaletteSorter:
def __init__(self, palette, target_spaces):
# palette: ColorPalette Object
# target_space: ['rgb', 'hsl', 'hsv', 'lab', 'lch']
self.standard_sorter = SinglePaletteSorter(palette)
self.palette_sorters = {}
for target_space in target_spaces:
self.palette_sorters[target_space] = SinglePaletteSorter(palette, target_space)
#================
# Sort Functions
#================
def lex_sort(self):
sorted_result = {}
for target_space, sorter in self.palette_sorters.items():
sorted_result[target_space.lower()] = sorter.lex_sort()
return sorted_result
def standard_sort(self):
return self.standard_sorter.standard_sort()
|
[
"os.path.dirname"
] |
[((69, 94), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (84, 94), False, 'import os\n')]
|
# Code generated by lark_sdk_gen. DO NOT EDIT.
import unittest
import pylark
import pytest
from tests.test_conf import app_all_permission, app_no_permission
from tests.test_helper import mock_get_tenant_access_token_failed
def mock(*args, **kwargs):
raise pylark.PyLarkError(scope="scope", func="func", code=1, msg="mock-failed")
def mock_raw_request(*args, **kwargs):
raise pylark.PyLarkError(
scope="scope", func="func", code=1, msg="mock-raw-request-failed"
)
# mock get token
class TestMessageSampleMockGetTokenFailed(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestMessageSampleMockGetTokenFailed, self).__init__(*args, **kwargs)
self.cli = app_all_permission.ins()
self.cli.auth.get_tenant_access_token = mock_get_tenant_access_token_failed
self.cli.auth.get_app_access_token = mock_get_tenant_access_token_failed
self.module_cli = self.cli.message
def test_mock_get_token_send_ephemeral_message(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.send_ephemeral_message(pylark.SendEphemeralMessageReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_send_urgent_app_message(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.send_urgent_app_message(pylark.SendUrgentAppMessageReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_send_urgent_sms_message(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.send_urgent_sms_message(pylark.SendUrgentSmsMessageReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_send_urgent_phone_message(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.send_urgent_phone_message(
pylark.SendUrgentPhoneMessageReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_send_raw_message(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.send_raw_message(pylark.SendRawMessageReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_send_raw_message_old(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.send_raw_message_old(pylark.SendRawMessageOldReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_batch_send_old_raw_message(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_send_old_raw_message(
pylark.BatchSendOldRawMessageReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_reply_raw_message(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.reply_raw_message(pylark.ReplyRawMessageReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_delete_message(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_message(pylark.DeleteMessageReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_batch_delete_message(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_delete_message(pylark.BatchDeleteMessageReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_update_message(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_message(pylark.UpdateMessageReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_get_message_read_user_list(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_message_read_user_list(
pylark.GetMessageReadUserListReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_get_batch_sent_message_read_user(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_batch_sent_message_read_user(
pylark.GetBatchSentMessageReadUserReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_get_message_list(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_message_list(pylark.GetMessageListReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_get_message_file(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_message_file(pylark.GetMessageFileReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_get_message(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_message(pylark.GetMessageReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_delete_ephemeral_message(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_ephemeral_message(pylark.DeleteEphemeralMessageReq())
assert "msg=failed" in f"{e}"
# mock mock self func
class TestMessageSampleMockSelfFuncFailed(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestMessageSampleMockSelfFuncFailed, self).__init__(*args, **kwargs)
self.cli = app_all_permission.ins()
self.module_cli = self.cli.message
def test_mock_self_func_send_ephemeral_message(self):
origin_func = self.module_cli.send_ephemeral_message
self.module_cli.send_ephemeral_message = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.send_ephemeral_message(pylark.SendEphemeralMessageReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.send_ephemeral_message = origin_func
def test_mock_self_func_send_urgent_app_message(self):
origin_func = self.module_cli.send_urgent_app_message
self.module_cli.send_urgent_app_message = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.send_urgent_app_message(pylark.SendUrgentAppMessageReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.send_urgent_app_message = origin_func
def test_mock_self_func_send_urgent_sms_message(self):
origin_func = self.module_cli.send_urgent_sms_message
self.module_cli.send_urgent_sms_message = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.send_urgent_sms_message(pylark.SendUrgentSmsMessageReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.send_urgent_sms_message = origin_func
def test_mock_self_func_send_urgent_phone_message(self):
origin_func = self.module_cli.send_urgent_phone_message
self.module_cli.send_urgent_phone_message = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.send_urgent_phone_message(
pylark.SendUrgentPhoneMessageReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.send_urgent_phone_message = origin_func
def test_mock_self_func_send_raw_message(self):
origin_func = self.module_cli.send_raw_message
self.module_cli.send_raw_message = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.send_raw_message(pylark.SendRawMessageReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.send_raw_message = origin_func
def test_mock_self_func_send_raw_message_old(self):
origin_func = self.module_cli.send_raw_message_old
self.module_cli.send_raw_message_old = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.send_raw_message_old(pylark.SendRawMessageOldReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.send_raw_message_old = origin_func
def test_mock_self_func_batch_send_old_raw_message(self):
origin_func = self.module_cli.batch_send_old_raw_message
self.module_cli.batch_send_old_raw_message = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_send_old_raw_message(
pylark.BatchSendOldRawMessageReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.batch_send_old_raw_message = origin_func
def test_mock_self_func_reply_raw_message(self):
origin_func = self.module_cli.reply_raw_message
self.module_cli.reply_raw_message = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.reply_raw_message(pylark.ReplyRawMessageReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.reply_raw_message = origin_func
def test_mock_self_func_delete_message(self):
origin_func = self.module_cli.delete_message
self.module_cli.delete_message = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_message(pylark.DeleteMessageReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.delete_message = origin_func
def test_mock_self_func_batch_delete_message(self):
origin_func = self.module_cli.batch_delete_message
self.module_cli.batch_delete_message = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_delete_message(pylark.BatchDeleteMessageReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.batch_delete_message = origin_func
def test_mock_self_func_update_message(self):
origin_func = self.module_cli.update_message
self.module_cli.update_message = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_message(pylark.UpdateMessageReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.update_message = origin_func
def test_mock_self_func_get_message_read_user_list(self):
origin_func = self.module_cli.get_message_read_user_list
self.module_cli.get_message_read_user_list = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_message_read_user_list(
pylark.GetMessageReadUserListReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.get_message_read_user_list = origin_func
def test_mock_self_func_get_batch_sent_message_read_user(self):
origin_func = self.module_cli.get_batch_sent_message_read_user
self.module_cli.get_batch_sent_message_read_user = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_batch_sent_message_read_user(
pylark.GetBatchSentMessageReadUserReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.get_batch_sent_message_read_user = origin_func
def test_mock_self_func_get_message_list(self):
origin_func = self.module_cli.get_message_list
self.module_cli.get_message_list = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_message_list(pylark.GetMessageListReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.get_message_list = origin_func
def test_mock_self_func_get_message_file(self):
origin_func = self.module_cli.get_message_file
self.module_cli.get_message_file = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_message_file(pylark.GetMessageFileReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.get_message_file = origin_func
def test_mock_self_func_get_message(self):
origin_func = self.module_cli.get_message
self.module_cli.get_message = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_message(pylark.GetMessageReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.get_message = origin_func
def test_mock_self_func_delete_ephemeral_message(self):
origin_func = self.module_cli.delete_ephemeral_message
self.module_cli.delete_ephemeral_message = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_ephemeral_message(pylark.DeleteEphemeralMessageReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.delete_ephemeral_message = origin_func
# mock raw request
class TestMessageSampleMockRawRequestFailed(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestMessageSampleMockRawRequestFailed, self).__init__(*args, **kwargs)
self.cli = app_all_permission.ins()
self.module_cli = self.cli.message
self.cli.raw_request = mock_raw_request
def test_mock_raw_request_send_ephemeral_message(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.send_ephemeral_message(pylark.SendEphemeralMessageReq())
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_send_urgent_app_message(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.send_urgent_app_message(
pylark.SendUrgentAppMessageReq(
message_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_send_urgent_sms_message(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.send_urgent_sms_message(
pylark.SendUrgentSmsMessageReq(
message_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_send_urgent_phone_message(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.send_urgent_phone_message(
pylark.SendUrgentPhoneMessageReq(
message_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_send_raw_message(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.send_raw_message(pylark.SendRawMessageReq())
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_send_raw_message_old(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.send_raw_message_old(pylark.SendRawMessageOldReq())
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_batch_send_old_raw_message(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_send_old_raw_message(
pylark.BatchSendOldRawMessageReq()
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_reply_raw_message(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.reply_raw_message(
pylark.ReplyRawMessageReq(
message_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_delete_message(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_message(
pylark.DeleteMessageReq(
message_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_batch_delete_message(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_delete_message(
pylark.BatchDeleteMessageReq(
batch_message_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_update_message(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_message(
pylark.UpdateMessageReq(
message_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_get_message_read_user_list(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_message_read_user_list(
pylark.GetMessageReadUserListReq(
message_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_get_batch_sent_message_read_user(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_batch_sent_message_read_user(
pylark.GetBatchSentMessageReadUserReq(
batch_message_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_get_message_list(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_message_list(pylark.GetMessageListReq())
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_get_message_file(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_message_file(
pylark.GetMessageFileReq(
message_id="x",
file_key="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_get_message(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_message(
pylark.GetMessageReq(
message_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_delete_ephemeral_message(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_ephemeral_message(pylark.DeleteEphemeralMessageReq())
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
# real request
class TestMessageSampleRealRequestFailed(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestMessageSampleRealRequestFailed, self).__init__(*args, **kwargs)
self.cli = app_no_permission.ins()
self.module_cli = self.cli.message
def test_real_request_send_ephemeral_message(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.send_ephemeral_message(pylark.SendEphemeralMessageReq())
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_send_urgent_app_message(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.send_urgent_app_message(
pylark.SendUrgentAppMessageReq(
message_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_send_urgent_sms_message(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.send_urgent_sms_message(
pylark.SendUrgentSmsMessageReq(
message_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_send_urgent_phone_message(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.send_urgent_phone_message(
pylark.SendUrgentPhoneMessageReq(
message_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_send_raw_message(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.send_raw_message(pylark.SendRawMessageReq())
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_send_raw_message_old(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.send_raw_message_old(pylark.SendRawMessageOldReq())
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_batch_send_old_raw_message(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_send_old_raw_message(
pylark.BatchSendOldRawMessageReq()
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_reply_raw_message(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.reply_raw_message(
pylark.ReplyRawMessageReq(
message_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_delete_message(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_message(
pylark.DeleteMessageReq(
message_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_batch_delete_message(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_delete_message(
pylark.BatchDeleteMessageReq(
batch_message_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_update_message(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_message(
pylark.UpdateMessageReq(
message_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_get_message_read_user_list(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_message_read_user_list(
pylark.GetMessageReadUserListReq(
message_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_get_batch_sent_message_read_user(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_batch_sent_message_read_user(
pylark.GetBatchSentMessageReadUserReq(
batch_message_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_get_message_list(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_message_list(pylark.GetMessageListReq())
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_get_message_file(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_message_file(
pylark.GetMessageFileReq(
message_id="x",
file_key="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_get_message(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_message(
pylark.GetMessageReq(
message_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_delete_ephemeral_message(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_ephemeral_message(pylark.DeleteEphemeralMessageReq())
assert e.type is pylark.PyLarkError
assert e.value.code > 0
|
[
"pylark.SendUrgentPhoneMessageReq",
"pylark.SendUrgentSmsMessageReq",
"pylark.GetBatchSentMessageReadUserReq",
"pylark.DeleteEphemeralMessageReq",
"pylark.UpdateMessageReq",
"pylark.SendRawMessageOldReq",
"pylark.PyLarkError",
"pylark.SendUrgentAppMessageReq",
"pylark.SendEphemeralMessageReq",
"pytest.raises",
"pylark.GetMessageFileReq",
"pylark.DeleteMessageReq",
"pylark.BatchDeleteMessageReq",
"pylark.GetMessageReq",
"tests.test_conf.app_all_permission.ins",
"pylark.SendRawMessageReq",
"pylark.BatchSendOldRawMessageReq",
"pylark.GetMessageReadUserListReq",
"pylark.GetMessageListReq",
"pylark.ReplyRawMessageReq",
"tests.test_conf.app_no_permission.ins"
] |
[((263, 336), 'pylark.PyLarkError', 'pylark.PyLarkError', ([], {'scope': '"""scope"""', 'func': '"""func"""', 'code': '(1)', 'msg': '"""mock-failed"""'}), "(scope='scope', func='func', code=1, msg='mock-failed')\n", (281, 336), False, 'import pylark\n'), ((388, 478), 'pylark.PyLarkError', 'pylark.PyLarkError', ([], {'scope': '"""scope"""', 'func': '"""func"""', 'code': '(1)', 'msg': '"""mock-raw-request-failed"""'}), "(scope='scope', func='func', code=1, msg=\n 'mock-raw-request-failed')\n", (406, 478), False, 'import pylark\n'), ((712, 736), 'tests.test_conf.app_all_permission.ins', 'app_all_permission.ins', ([], {}), '()\n', (734, 736), False, 'from tests.test_conf import app_all_permission, app_no_permission\n'), ((5211, 5235), 'tests.test_conf.app_all_permission.ins', 'app_all_permission.ins', ([], {}), '()\n', (5233, 5235), False, 'from tests.test_conf import app_all_permission, app_no_permission\n'), ((12553, 12577), 'tests.test_conf.app_all_permission.ins', 'app_all_permission.ins', ([], {}), '()\n', (12575, 12577), False, 'from tests.test_conf import app_all_permission, app_no_permission\n'), ((19427, 19450), 'tests.test_conf.app_no_permission.ins', 'app_no_permission.ins', ([], {}), '()\n', (19448, 19450), False, 'from tests.test_conf import app_all_permission, app_no_permission\n'), ((1017, 1050), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (1030, 1050), False, 'import pytest\n'), ((1254, 1287), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (1267, 1287), False, 'import pytest\n'), ((1492, 1525), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (1505, 1525), False, 'import pytest\n'), ((1732, 1765), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (1745, 1765), False, 'import pytest\n'), ((1997, 2030), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (2010, 2030), False, 'import pytest\n'), ((2219, 2252), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (2232, 2252), False, 'import pytest\n'), ((2454, 2487), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (2467, 2487), False, 'import pytest\n'), ((2721, 2754), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (2734, 2754), False, 'import pytest\n'), ((2939, 2972), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (2952, 2972), False, 'import pytest\n'), ((3158, 3191), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (3171, 3191), False, 'import pytest\n'), ((3382, 3415), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (3395, 3415), False, 'import pytest\n'), ((3607, 3640), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (3620, 3640), False, 'import pytest\n'), ((3889, 3922), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (3902, 3922), False, 'import pytest\n'), ((4166, 4199), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (4179, 4199), False, 'import pytest\n'), ((4384, 4417), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (4397, 4417), False, 'import pytest\n'), ((4597, 4630), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (4610, 4630), False, 'import pytest\n'), ((4814, 4847), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (4827, 4847), False, 'import pytest\n'), ((5467, 5500), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (5480, 5500), False, 'import pytest\n'), ((5888, 5921), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (5901, 5921), False, 'import pytest\n'), ((6311, 6344), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (6324, 6344), False, 'import pytest\n'), ((6740, 6773), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (6753, 6773), False, 'import pytest\n'), ((7178, 7211), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (7191, 7211), False, 'import pytest\n'), ((7572, 7605), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (7585, 7605), False, 'import pytest\n'), ((7995, 8028), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (8008, 8028), False, 'import pytest\n'), ((8438, 8471), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (8451, 8471), False, 'import pytest\n'), ((8817, 8850), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (8830, 8850), False, 'import pytest\n'), ((9206, 9239), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (9219, 9239), False, 'import pytest\n'), ((9594, 9627), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (9607, 9627), False, 'import pytest\n'), ((10001, 10034), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (10014, 10034), False, 'import pytest\n'), ((10489, 10522), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (10502, 10522), False, 'import pytest\n'), ((10946, 10979), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (10959, 10979), False, 'import pytest\n'), ((11328, 11361), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (11341, 11361), False, 'import pytest\n'), ((11695, 11728), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (11708, 11728), False, 'import pytest\n'), ((12087, 12120), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (12100, 12120), False, 'import pytest\n'), ((12743, 12776), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (12756, 12776), False, 'import pytest\n'), ((13076, 13109), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (13089, 13109), False, 'import pytest\n'), ((13493, 13526), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (13506, 13526), False, 'import pytest\n'), ((13912, 13945), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (13925, 13945), False, 'import pytest\n'), ((14326, 14359), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (14339, 14359), False, 'import pytest\n'), ((14644, 14677), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (14657, 14677), False, 'import pytest\n'), ((14975, 15008), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (14988, 15008), False, 'import pytest\n'), ((15338, 15371), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (15351, 15371), False, 'import pytest\n'), ((15735, 15768), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (15748, 15768), False, 'import pytest\n'), ((16133, 16166), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (16146, 16166), False, 'import pytest\n'), ((16542, 16575), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (16555, 16575), False, 'import pytest\n'), ((16946, 16979), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (16959, 16979), False, 'import pytest\n'), ((17377, 17410), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (17390, 17410), False, 'import pytest\n'), ((17809, 17842), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (17822, 17842), False, 'import pytest\n'), ((18123, 18156), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (18136, 18156), False, 'import pytest\n'), ((18549, 18582), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (18562, 18582), False, 'import pytest\n'), ((18945, 18978), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (18958, 18978), False, 'import pytest\n'), ((19564, 19597), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (19577, 19597), False, 'import pytest\n'), ((19837, 19870), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (19850, 19870), False, 'import pytest\n'), ((20194, 20227), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (20207, 20227), False, 'import pytest\n'), ((20553, 20586), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (20566, 20586), False, 'import pytest\n'), ((20907, 20940), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (20920, 20940), False, 'import pytest\n'), ((21165, 21198), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (21178, 21198), False, 'import pytest\n'), ((21436, 21469), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (21449, 21469), False, 'import pytest\n'), ((21739, 21772), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (21752, 21772), False, 'import pytest\n'), ((22076, 22109), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (22089, 22109), False, 'import pytest\n'), ((22414, 22447), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (22427, 22447), False, 'import pytest\n'), ((22763, 22796), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (22776, 22796), False, 'import pytest\n'), ((23107, 23140), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (23120, 23140), False, 'import pytest\n'), ((23478, 23511), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (23491, 23511), False, 'import pytest\n'), ((23850, 23883), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (23863, 23883), False, 'import pytest\n'), ((24104, 24137), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (24117, 24137), False, 'import pytest\n'), ((24470, 24503), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (24483, 24503), False, 'import pytest\n'), ((24806, 24839), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (24819, 24839), False, 'import pytest\n'), ((1108, 1140), 'pylark.SendEphemeralMessageReq', 'pylark.SendEphemeralMessageReq', ([], {}), '()\n', (1138, 1140), False, 'import pylark\n'), ((1346, 1378), 'pylark.SendUrgentAppMessageReq', 'pylark.SendUrgentAppMessageReq', ([], {}), '()\n', (1376, 1378), False, 'import pylark\n'), ((1584, 1616), 'pylark.SendUrgentSmsMessageReq', 'pylark.SendUrgentSmsMessageReq', ([], {}), '()\n', (1614, 1616), False, 'import pylark\n'), ((1843, 1877), 'pylark.SendUrgentPhoneMessageReq', 'pylark.SendUrgentPhoneMessageReq', ([], {}), '()\n', (1875, 1877), False, 'import pylark\n'), ((2082, 2108), 'pylark.SendRawMessageReq', 'pylark.SendRawMessageReq', ([], {}), '()\n', (2106, 2108), False, 'import pylark\n'), ((2308, 2337), 'pylark.SendRawMessageOldReq', 'pylark.SendRawMessageOldReq', ([], {}), '()\n', (2335, 2337), False, 'import pylark\n'), ((2566, 2600), 'pylark.BatchSendOldRawMessageReq', 'pylark.BatchSendOldRawMessageReq', ([], {}), '()\n', (2598, 2600), False, 'import pylark\n'), ((2807, 2834), 'pylark.ReplyRawMessageReq', 'pylark.ReplyRawMessageReq', ([], {}), '()\n', (2832, 2834), False, 'import pylark\n'), ((3022, 3047), 'pylark.DeleteMessageReq', 'pylark.DeleteMessageReq', ([], {}), '()\n', (3045, 3047), False, 'import pylark\n'), ((3247, 3277), 'pylark.BatchDeleteMessageReq', 'pylark.BatchDeleteMessageReq', ([], {}), '()\n', (3275, 3277), False, 'import pylark\n'), ((3465, 3490), 'pylark.UpdateMessageReq', 'pylark.UpdateMessageReq', ([], {}), '()\n', (3488, 3490), False, 'import pylark\n'), ((3719, 3753), 'pylark.GetMessageReadUserListReq', 'pylark.GetMessageReadUserListReq', ([], {}), '()\n', (3751, 3753), False, 'import pylark\n'), ((4007, 4046), 'pylark.GetBatchSentMessageReadUserReq', 'pylark.GetBatchSentMessageReadUserReq', ([], {}), '()\n', (4044, 4046), False, 'import pylark\n'), ((4251, 4277), 'pylark.GetMessageListReq', 'pylark.GetMessageListReq', ([], {}), '()\n', (4275, 4277), False, 'import pylark\n'), ((4469, 4495), 'pylark.GetMessageFileReq', 'pylark.GetMessageFileReq', ([], {}), '()\n', (4493, 4495), False, 'import pylark\n'), ((4677, 4699), 'pylark.GetMessageReq', 'pylark.GetMessageReq', ([], {}), '()\n', (4697, 4699), False, 'import pylark\n'), ((4907, 4941), 'pylark.DeleteEphemeralMessageReq', 'pylark.DeleteEphemeralMessageReq', ([], {}), '()\n', (4939, 4941), False, 'import pylark\n'), ((5558, 5590), 'pylark.SendEphemeralMessageReq', 'pylark.SendEphemeralMessageReq', ([], {}), '()\n', (5588, 5590), False, 'import pylark\n'), ((5980, 6012), 'pylark.SendUrgentAppMessageReq', 'pylark.SendUrgentAppMessageReq', ([], {}), '()\n', (6010, 6012), False, 'import pylark\n'), ((6403, 6435), 'pylark.SendUrgentSmsMessageReq', 'pylark.SendUrgentSmsMessageReq', ([], {}), '()\n', (6433, 6435), False, 'import pylark\n'), ((6851, 6885), 'pylark.SendUrgentPhoneMessageReq', 'pylark.SendUrgentPhoneMessageReq', ([], {}), '()\n', (6883, 6885), False, 'import pylark\n'), ((7263, 7289), 'pylark.SendRawMessageReq', 'pylark.SendRawMessageReq', ([], {}), '()\n', (7287, 7289), False, 'import pylark\n'), ((7661, 7690), 'pylark.SendRawMessageOldReq', 'pylark.SendRawMessageOldReq', ([], {}), '()\n', (7688, 7690), False, 'import pylark\n'), ((8107, 8141), 'pylark.BatchSendOldRawMessageReq', 'pylark.BatchSendOldRawMessageReq', ([], {}), '()\n', (8139, 8141), False, 'import pylark\n'), ((8524, 8551), 'pylark.ReplyRawMessageReq', 'pylark.ReplyRawMessageReq', ([], {}), '()\n', (8549, 8551), False, 'import pylark\n'), ((8900, 8925), 'pylark.DeleteMessageReq', 'pylark.DeleteMessageReq', ([], {}), '()\n', (8923, 8925), False, 'import pylark\n'), ((9295, 9325), 'pylark.BatchDeleteMessageReq', 'pylark.BatchDeleteMessageReq', ([], {}), '()\n', (9323, 9325), False, 'import pylark\n'), ((9677, 9702), 'pylark.UpdateMessageReq', 'pylark.UpdateMessageReq', ([], {}), '()\n', (9700, 9702), False, 'import pylark\n'), ((10113, 10147), 'pylark.GetMessageReadUserListReq', 'pylark.GetMessageReadUserListReq', ([], {}), '()\n', (10145, 10147), False, 'import pylark\n'), ((10607, 10646), 'pylark.GetBatchSentMessageReadUserReq', 'pylark.GetBatchSentMessageReadUserReq', ([], {}), '()\n', (10644, 10646), False, 'import pylark\n'), ((11031, 11057), 'pylark.GetMessageListReq', 'pylark.GetMessageListReq', ([], {}), '()\n', (11055, 11057), False, 'import pylark\n'), ((11413, 11439), 'pylark.GetMessageFileReq', 'pylark.GetMessageFileReq', ([], {}), '()\n', (11437, 11439), False, 'import pylark\n'), ((11775, 11797), 'pylark.GetMessageReq', 'pylark.GetMessageReq', ([], {}), '()\n', (11795, 11797), False, 'import pylark\n'), ((12180, 12214), 'pylark.DeleteEphemeralMessageReq', 'pylark.DeleteEphemeralMessageReq', ([], {}), '()\n', (12212, 12214), False, 'import pylark\n'), ((12834, 12866), 'pylark.SendEphemeralMessageReq', 'pylark.SendEphemeralMessageReq', ([], {}), '()\n', (12864, 12866), False, 'import pylark\n'), ((13185, 13231), 'pylark.SendUrgentAppMessageReq', 'pylark.SendUrgentAppMessageReq', ([], {'message_id': '"""x"""'}), "(message_id='x')\n", (13215, 13231), False, 'import pylark\n'), ((13602, 13648), 'pylark.SendUrgentSmsMessageReq', 'pylark.SendUrgentSmsMessageReq', ([], {'message_id': '"""x"""'}), "(message_id='x')\n", (13632, 13648), False, 'import pylark\n'), ((14023, 14071), 'pylark.SendUrgentPhoneMessageReq', 'pylark.SendUrgentPhoneMessageReq', ([], {'message_id': '"""x"""'}), "(message_id='x')\n", (14055, 14071), False, 'import pylark\n'), ((14411, 14437), 'pylark.SendRawMessageReq', 'pylark.SendRawMessageReq', ([], {}), '()\n', (14435, 14437), False, 'import pylark\n'), ((14733, 14762), 'pylark.SendRawMessageOldReq', 'pylark.SendRawMessageOldReq', ([], {}), '()\n', (14760, 14762), False, 'import pylark\n'), ((15087, 15121), 'pylark.BatchSendOldRawMessageReq', 'pylark.BatchSendOldRawMessageReq', ([], {}), '()\n', (15119, 15121), False, 'import pylark\n'), ((15441, 15482), 'pylark.ReplyRawMessageReq', 'pylark.ReplyRawMessageReq', ([], {'message_id': '"""x"""'}), "(message_id='x')\n", (15466, 15482), False, 'import pylark\n'), ((15835, 15874), 'pylark.DeleteMessageReq', 'pylark.DeleteMessageReq', ([], {'message_id': '"""x"""'}), "(message_id='x')\n", (15858, 15874), False, 'import pylark\n'), ((16239, 16289), 'pylark.BatchDeleteMessageReq', 'pylark.BatchDeleteMessageReq', ([], {'batch_message_id': '"""x"""'}), "(batch_message_id='x')\n", (16267, 16289), False, 'import pylark\n'), ((16642, 16681), 'pylark.UpdateMessageReq', 'pylark.UpdateMessageReq', ([], {'message_id': '"""x"""'}), "(message_id='x')\n", (16665, 16681), False, 'import pylark\n'), ((17058, 17106), 'pylark.GetMessageReadUserListReq', 'pylark.GetMessageReadUserListReq', ([], {'message_id': '"""x"""'}), "(message_id='x')\n", (17090, 17106), False, 'import pylark\n'), ((17495, 17554), 'pylark.GetBatchSentMessageReadUserReq', 'pylark.GetBatchSentMessageReadUserReq', ([], {'batch_message_id': '"""x"""'}), "(batch_message_id='x')\n", (17532, 17554), False, 'import pylark\n'), ((17894, 17920), 'pylark.GetMessageListReq', 'pylark.GetMessageListReq', ([], {}), '()\n', (17918, 17920), False, 'import pylark\n'), ((18225, 18279), 'pylark.GetMessageFileReq', 'pylark.GetMessageFileReq', ([], {'message_id': '"""x"""', 'file_key': '"""x"""'}), "(message_id='x', file_key='x')\n", (18249, 18279), False, 'import pylark\n'), ((18646, 18682), 'pylark.GetMessageReq', 'pylark.GetMessageReq', ([], {'message_id': '"""x"""'}), "(message_id='x')\n", (18666, 18682), False, 'import pylark\n'), ((19038, 19072), 'pylark.DeleteEphemeralMessageReq', 'pylark.DeleteEphemeralMessageReq', ([], {}), '()\n', (19070, 19072), False, 'import pylark\n'), ((19655, 19687), 'pylark.SendEphemeralMessageReq', 'pylark.SendEphemeralMessageReq', ([], {}), '()\n', (19685, 19687), False, 'import pylark\n'), ((19946, 19992), 'pylark.SendUrgentAppMessageReq', 'pylark.SendUrgentAppMessageReq', ([], {'message_id': '"""x"""'}), "(message_id='x')\n", (19976, 19992), False, 'import pylark\n'), ((20303, 20349), 'pylark.SendUrgentSmsMessageReq', 'pylark.SendUrgentSmsMessageReq', ([], {'message_id': '"""x"""'}), "(message_id='x')\n", (20333, 20349), False, 'import pylark\n'), ((20664, 20712), 'pylark.SendUrgentPhoneMessageReq', 'pylark.SendUrgentPhoneMessageReq', ([], {'message_id': '"""x"""'}), "(message_id='x')\n", (20696, 20712), False, 'import pylark\n'), ((20992, 21018), 'pylark.SendRawMessageReq', 'pylark.SendRawMessageReq', ([], {}), '()\n', (21016, 21018), False, 'import pylark\n'), ((21254, 21283), 'pylark.SendRawMessageOldReq', 'pylark.SendRawMessageOldReq', ([], {}), '()\n', (21281, 21283), False, 'import pylark\n'), ((21548, 21582), 'pylark.BatchSendOldRawMessageReq', 'pylark.BatchSendOldRawMessageReq', ([], {}), '()\n', (21580, 21582), False, 'import pylark\n'), ((21842, 21883), 'pylark.ReplyRawMessageReq', 'pylark.ReplyRawMessageReq', ([], {'message_id': '"""x"""'}), "(message_id='x')\n", (21867, 21883), False, 'import pylark\n'), ((22176, 22215), 'pylark.DeleteMessageReq', 'pylark.DeleteMessageReq', ([], {'message_id': '"""x"""'}), "(message_id='x')\n", (22199, 22215), False, 'import pylark\n'), ((22520, 22570), 'pylark.BatchDeleteMessageReq', 'pylark.BatchDeleteMessageReq', ([], {'batch_message_id': '"""x"""'}), "(batch_message_id='x')\n", (22548, 22570), False, 'import pylark\n'), ((22863, 22902), 'pylark.UpdateMessageReq', 'pylark.UpdateMessageReq', ([], {'message_id': '"""x"""'}), "(message_id='x')\n", (22886, 22902), False, 'import pylark\n'), ((23219, 23267), 'pylark.GetMessageReadUserListReq', 'pylark.GetMessageReadUserListReq', ([], {'message_id': '"""x"""'}), "(message_id='x')\n", (23251, 23267), False, 'import pylark\n'), ((23596, 23655), 'pylark.GetBatchSentMessageReadUserReq', 'pylark.GetBatchSentMessageReadUserReq', ([], {'batch_message_id': '"""x"""'}), "(batch_message_id='x')\n", (23633, 23655), False, 'import pylark\n'), ((23935, 23961), 'pylark.GetMessageListReq', 'pylark.GetMessageListReq', ([], {}), '()\n', (23959, 23961), False, 'import pylark\n'), ((24206, 24260), 'pylark.GetMessageFileReq', 'pylark.GetMessageFileReq', ([], {'message_id': '"""x"""', 'file_key': '"""x"""'}), "(message_id='x', file_key='x')\n", (24230, 24260), False, 'import pylark\n'), ((24567, 24603), 'pylark.GetMessageReq', 'pylark.GetMessageReq', ([], {'message_id': '"""x"""'}), "(message_id='x')\n", (24587, 24603), False, 'import pylark\n'), ((24899, 24933), 'pylark.DeleteEphemeralMessageReq', 'pylark.DeleteEphemeralMessageReq', ([], {}), '()\n', (24931, 24933), False, 'import pylark\n')]
|
from typing import NamedTuple
from urllib.parse import (
urlsplit,
urlunsplit,
)
class URL(NamedTuple):
scheme: str
host: str
path: str
query: str
fragment: str
@classmethod
def from_string(cls, url_string: str) -> 'URL':
split_result = urlsplit(url_string)
return cls(
scheme=split_result.scheme,
host=split_result.netloc,
path=split_result.path,
query=split_result.query,
fragment=split_result.fragment,
)
def to_string(self) -> str:
return urlunsplit(
(
self.scheme,
self.host,
self.path,
self.query,
self.fragment,
)
)
|
[
"urllib.parse.urlsplit",
"urllib.parse.urlunsplit"
] |
[((284, 304), 'urllib.parse.urlsplit', 'urlsplit', (['url_string'], {}), '(url_string)\n', (292, 304), False, 'from urllib.parse import urlsplit, urlunsplit\n'), ((579, 653), 'urllib.parse.urlunsplit', 'urlunsplit', (['(self.scheme, self.host, self.path, self.query, self.fragment)'], {}), '((self.scheme, self.host, self.path, self.query, self.fragment))\n', (589, 653), False, 'from urllib.parse import urlsplit, urlunsplit\n')]
|
import pygame
import random
import numpy as np
from collections import deque
import tensorflow as tf # http://blog.topspeedsnail.com/archives/10116
import cv2 # http://blog.topspeedsnail.com/archives/4755
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
SCREEN_SIZE = [320, 400]
BAR_SIZE = [50, 5]
BALL_SIZE = [15, 15]
# 神经网络的输出
MOVE_STAY = [1, 0, 0]
MOVE_LEFT = [0, 1, 0]
MOVE_RIGHT = [0, 0, 1]
class Game(object):
def __init__(self):
pygame.init()
self.clock = pygame.time.Clock()
self.screen = pygame.display.set_mode(SCREEN_SIZE)
pygame.display.set_caption('Simple Game')
self.ball_pos_x = SCREEN_SIZE[0] // 2 - BALL_SIZE[0] / 2
self.ball_pos_y = SCREEN_SIZE[1] // 2 - BALL_SIZE[1] / 2
self.ball_dir_x = -1 # -1 = left 1 = right
self.ball_dir_y = -1 # -1 = up 1 = down
self.ball_pos = pygame.Rect(self.ball_pos_x, self.ball_pos_y, BALL_SIZE[0], BALL_SIZE[1])
self.bar_pos_x = SCREEN_SIZE[0] // 2 - BAR_SIZE[0] // 2
self.bar_pos = pygame.Rect(self.bar_pos_x, SCREEN_SIZE[1] - BAR_SIZE[1], BAR_SIZE[0], BAR_SIZE[1])
# action是MOVE_STAY、MOVE_LEFT、MOVE_RIGHT
# ai控制棒子左右移动;返回游戏界面像素数和对应的奖励。(像素->奖励->强化棒子往奖励高的方向移动)
def step(self, action):
if action == MOVE_LEFT:
self.bar_pos_x = self.bar_pos_x - 2
elif action == MOVE_RIGHT:
self.bar_pos_x = self.bar_pos_x + 2
else:
pass
if self.bar_pos_x < 0:
self.bar_pos_x = 0
if self.bar_pos_x > SCREEN_SIZE[0] - BAR_SIZE[0]:
self.bar_pos_x = SCREEN_SIZE[0] - BAR_SIZE[0]
self.screen.fill(BLACK)
self.bar_pos.left = self.bar_pos_x
pygame.draw.rect(self.screen, WHITE, self.bar_pos)
self.ball_pos.left += self.ball_dir_x * 2
self.ball_pos.bottom += self.ball_dir_y * 3
pygame.draw.rect(self.screen, WHITE, self.ball_pos)
if self.ball_pos.top <= 0 or self.ball_pos.bottom >= (SCREEN_SIZE[1] - BAR_SIZE[1] + 1):
self.ball_dir_y = self.ball_dir_y * -1
if self.ball_pos.left <= 0 or self.ball_pos.right >= (SCREEN_SIZE[0]):
self.ball_dir_x = self.ball_dir_x * -1
reward = 0
if self.bar_pos.top <= self.ball_pos.bottom and (self.bar_pos.left < self.ball_pos.right and self.bar_pos.right > self.ball_pos.left):
reward = 1 # 击中奖励
elif self.bar_pos.top <= self.ball_pos.bottom and (self.bar_pos.left > self.ball_pos.right or self.bar_pos.right < self.ball_pos.left):
reward = -1 # 没击中惩罚
# 获得游戏界面像素
screen_image = pygame.surfarray.array3d(pygame.display.get_surface())
pygame.display.update()
# 返回游戏界面像素和对应的奖励
return reward, screen_image
# learning_rate
LEARNING_RATE = 0.99
# 更新梯度
INITIAL_EPSILON = 1.0
FINAL_EPSILON = 0.05
# 测试观测次数
EXPLORE = 500000
OBSERVE = 50000
# 存储过往经验大小
REPLAY_MEMORY = 500000
BATCH = 100
output = 3 # 输出层神经元数。代表3种操作-MOVE_STAY:[1, 0, 0] MOVE_LEFT:[0, 1, 0] MOVE_RIGHT:[0, 0, 1]
input_image = tf.placeholder("float", [None, 80, 100, 4]) # 游戏像素
action = tf.placeholder("float", [None, output]) # 操作
CHECKPOINT_PATH = r'E:/workspace/ai/gomoku/playground/dqn_example/checkpoints/ckpt'
# 定义CNN-卷积神经网络 参考:http://blog.topspeedsnail.com/archives/10451
def convolutional_neural_network(input_image):
weights = {'w_conv1': tf.Variable(tf.zeros([8, 8, 4, 32])),
'w_conv2': tf.Variable(tf.zeros([4, 4, 32, 64])),
'w_conv3': tf.Variable(tf.zeros([3, 3, 64, 64])),
'w_fc4': tf.Variable(tf.zeros([3456, 784])),
'w_out': tf.Variable(tf.zeros([784, output]))}
biases = {'b_conv1': tf.Variable(tf.zeros([32])),
'b_conv2': tf.Variable(tf.zeros([64])),
'b_conv3': tf.Variable(tf.zeros([64])),
'b_fc4': tf.Variable(tf.zeros([784])),
'b_out': tf.Variable(tf.zeros([output]))}
conv1 = tf.nn.relu(tf.nn.conv2d(input_image, weights['w_conv1'], strides=[1, 4, 4, 1], padding="VALID") + biases['b_conv1'])
conv2 = tf.nn.relu(tf.nn.conv2d(conv1, weights['w_conv2'], strides=[1, 2, 2, 1], padding="VALID") + biases['b_conv2'])
conv3 = tf.nn.relu(tf.nn.conv2d(conv2, weights['w_conv3'], strides=[1, 1, 1, 1], padding="VALID") + biases['b_conv3'])
conv3_flat = tf.reshape(conv3, [-1, 3456])
fc4 = tf.nn.relu(tf.matmul(conv3_flat, weights['w_fc4']) + biases['b_fc4'])
output_layer = tf.matmul(fc4, weights['w_out']) + biases['b_out']
return output_layer
# 深度强化学习入门: https://www.nervanasys.com/demystifying-deep-reinforcement-learning/
# 训练神经网络
def train_neural_network(input_image):
predict_action = convolutional_neural_network(input_image)
argmax = tf.placeholder("float", [None, output])
gt = tf.placeholder("float", [None])
action = tf.reduce_sum(tf.multiply(predict_action, argmax), reduction_indices=1)
cost = tf.reduce_mean(tf.square(action - gt))
optimizer = tf.train.AdamOptimizer(1e-6).minimize(cost)
game = Game()
D = deque()
_, image = game.step(MOVE_STAY)
# 转换为灰度值
image = cv2.cvtColor(cv2.resize(image, (100, 80)), cv2.COLOR_BGR2GRAY)
# 转换为二值
ret, image = cv2.threshold(image, 1, 255, cv2.THRESH_BINARY)
input_image_data = np.stack((image, image, image, image), axis=2)
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
saver = tf.train.Saver(max_to_keep=3)
try:
saver.restore(sess, CHECKPOINT_PATH)
except tf.errors.NotFoundError:
print('-1-')
n = 0
epsilon = INITIAL_EPSILON
while True:
action_t = predict_action.eval(feed_dict={input_image: [input_image_data]})[0]
argmax_t = np.zeros([output], dtype=np.int)
if (random.random() <= INITIAL_EPSILON):
maxIndex = random.randrange(output)
else:
maxIndex = np.argmax(action_t)
argmax_t[maxIndex] = 1
if epsilon > FINAL_EPSILON:
epsilon -= (INITIAL_EPSILON - FINAL_EPSILON) / EXPLORE
reward, image = game.step(list(argmax_t))
image = cv2.cvtColor(cv2.resize(image, (100, 80)), cv2.COLOR_BGR2GRAY)
ret, image = cv2.threshold(image, 1, 255, cv2.THRESH_BINARY)
image = np.reshape(image, (80, 100, 1))
input_image_data1 = np.append(image, input_image_data[:, :, 0:3], axis=2)
D.append((input_image_data, argmax_t, reward, input_image_data1))
if len(D) > REPLAY_MEMORY:
D.popleft()
if n > OBSERVE:
minibatch = random.sample(D, BATCH)
input_image_data_batch = [d[0] for d in minibatch]
argmax_batch = [d[1] for d in minibatch]
reward_batch = [d[2] for d in minibatch]
input_image_data1_batch = [d[3] for d in minibatch]
gt_batch = []
out_batch = predict_action.eval(feed_dict={input_image: input_image_data1_batch})
for i in range(0, len(minibatch)):
gt_batch.append(reward_batch[i] + LEARNING_RATE * np.max(out_batch[i]))
optimizer.run(feed_dict={gt: gt_batch, argmax: argmax_batch, input_image: input_image_data_batch})
input_image_data = input_image_data1
n = n + 1
if n % 2000 == 0:
saver.save(sess, CHECKPOINT_PATH) # 保存模型
print(n, "epsilon:", epsilon, " ", "action:", maxIndex, " ", "reward:", reward)
train_neural_network(input_image)
|
[
"numpy.argmax",
"random.sample",
"tensorflow.reshape",
"pygame.Rect",
"tensorflow.matmul",
"pygame.display.update",
"tensorflow.multiply",
"tensorflow.nn.conv2d",
"collections.deque",
"pygame.display.set_mode",
"tensorflow.placeholder",
"numpy.append",
"numpy.max",
"numpy.reshape",
"tensorflow.initialize_all_variables",
"pygame.display.set_caption",
"cv2.resize",
"numpy.stack",
"tensorflow.train.Saver",
"pygame.draw.rect",
"tensorflow.Session",
"pygame.init",
"random.random",
"pygame.time.Clock",
"cv2.threshold",
"numpy.zeros",
"pygame.display.get_surface",
"tensorflow.zeros",
"random.randrange",
"tensorflow.square",
"tensorflow.train.AdamOptimizer"
] |
[((3136, 3179), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, 80, 100, 4]'], {}), "('float', [None, 80, 100, 4])\n", (3150, 3179), True, 'import tensorflow as tf\n'), ((3198, 3237), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, output]'], {}), "('float', [None, output])\n", (3212, 3237), True, 'import tensorflow as tf\n'), ((4446, 4475), 'tensorflow.reshape', 'tf.reshape', (['conv3', '[-1, 3456]'], {}), '(conv3, [-1, 3456])\n', (4456, 4475), True, 'import tensorflow as tf\n'), ((4871, 4910), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, output]'], {}), "('float', [None, output])\n", (4885, 4910), True, 'import tensorflow as tf\n'), ((4921, 4952), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None]'], {}), "('float', [None])\n", (4935, 4952), True, 'import tensorflow as tf\n'), ((5183, 5190), 'collections.deque', 'deque', ([], {}), '()\n', (5188, 5190), False, 'from collections import deque\n'), ((5351, 5398), 'cv2.threshold', 'cv2.threshold', (['image', '(1)', '(255)', 'cv2.THRESH_BINARY'], {}), '(image, 1, 255, cv2.THRESH_BINARY)\n', (5364, 5398), False, 'import cv2\n'), ((5423, 5469), 'numpy.stack', 'np.stack', (['(image, image, image, image)'], {'axis': '(2)'}), '((image, image, image, image), axis=2)\n', (5431, 5469), True, 'import numpy as np\n'), ((470, 483), 'pygame.init', 'pygame.init', ([], {}), '()\n', (481, 483), False, 'import pygame\n'), ((506, 525), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (523, 525), False, 'import pygame\n'), ((549, 585), 'pygame.display.set_mode', 'pygame.display.set_mode', (['SCREEN_SIZE'], {}), '(SCREEN_SIZE)\n', (572, 585), False, 'import pygame\n'), ((595, 636), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Simple Game"""'], {}), "('Simple Game')\n", (621, 636), False, 'import pygame\n'), ((903, 976), 'pygame.Rect', 'pygame.Rect', (['self.ball_pos_x', 'self.ball_pos_y', 'BALL_SIZE[0]', 'BALL_SIZE[1]'], {}), '(self.ball_pos_x, self.ball_pos_y, BALL_SIZE[0], BALL_SIZE[1])\n', (914, 976), False, 'import pygame\n'), ((1068, 1155), 'pygame.Rect', 'pygame.Rect', (['self.bar_pos_x', '(SCREEN_SIZE[1] - BAR_SIZE[1])', 'BAR_SIZE[0]', 'BAR_SIZE[1]'], {}), '(self.bar_pos_x, SCREEN_SIZE[1] - BAR_SIZE[1], BAR_SIZE[0],\n BAR_SIZE[1])\n', (1079, 1155), False, 'import pygame\n'), ((1758, 1808), 'pygame.draw.rect', 'pygame.draw.rect', (['self.screen', 'WHITE', 'self.bar_pos'], {}), '(self.screen, WHITE, self.bar_pos)\n', (1774, 1808), False, 'import pygame\n'), ((1924, 1975), 'pygame.draw.rect', 'pygame.draw.rect', (['self.screen', 'WHITE', 'self.ball_pos'], {}), '(self.screen, WHITE, self.ball_pos)\n', (1940, 1975), False, 'import pygame\n'), ((2747, 2770), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (2768, 2770), False, 'import pygame\n'), ((4579, 4611), 'tensorflow.matmul', 'tf.matmul', (['fc4', "weights['w_out']"], {}), "(fc4, weights['w_out'])\n", (4588, 4611), True, 'import tensorflow as tf\n'), ((4983, 5018), 'tensorflow.multiply', 'tf.multiply', (['predict_action', 'argmax'], {}), '(predict_action, argmax)\n', (4994, 5018), True, 'import tensorflow as tf\n'), ((5068, 5090), 'tensorflow.square', 'tf.square', (['(action - gt)'], {}), '(action - gt)\n', (5077, 5090), True, 'import tensorflow as tf\n'), ((5270, 5298), 'cv2.resize', 'cv2.resize', (['image', '(100, 80)'], {}), '(image, (100, 80))\n', (5280, 5298), False, 'import cv2\n'), ((5482, 5494), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (5492, 5494), True, 'import tensorflow as tf\n'), ((5572, 5601), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(3)'}), '(max_to_keep=3)\n', (5586, 5601), True, 'import tensorflow as tf\n'), ((2708, 2736), 'pygame.display.get_surface', 'pygame.display.get_surface', ([], {}), '()\n', (2734, 2736), False, 'import pygame\n'), ((3488, 3511), 'tensorflow.zeros', 'tf.zeros', (['[8, 8, 4, 32]'], {}), '([8, 8, 4, 32])\n', (3496, 3511), True, 'import tensorflow as tf\n'), ((3553, 3577), 'tensorflow.zeros', 'tf.zeros', (['[4, 4, 32, 64]'], {}), '([4, 4, 32, 64])\n', (3561, 3577), True, 'import tensorflow as tf\n'), ((3619, 3643), 'tensorflow.zeros', 'tf.zeros', (['[3, 3, 64, 64]'], {}), '([3, 3, 64, 64])\n', (3627, 3643), True, 'import tensorflow as tf\n'), ((3683, 3704), 'tensorflow.zeros', 'tf.zeros', (['[3456, 784]'], {}), '([3456, 784])\n', (3691, 3704), True, 'import tensorflow as tf\n'), ((3744, 3767), 'tensorflow.zeros', 'tf.zeros', (['[784, output]'], {}), '([784, output])\n', (3752, 3767), True, 'import tensorflow as tf\n'), ((3810, 3824), 'tensorflow.zeros', 'tf.zeros', (['[32]'], {}), '([32])\n', (3818, 3824), True, 'import tensorflow as tf\n'), ((3865, 3879), 'tensorflow.zeros', 'tf.zeros', (['[64]'], {}), '([64])\n', (3873, 3879), True, 'import tensorflow as tf\n'), ((3920, 3934), 'tensorflow.zeros', 'tf.zeros', (['[64]'], {}), '([64])\n', (3928, 3934), True, 'import tensorflow as tf\n'), ((3973, 3988), 'tensorflow.zeros', 'tf.zeros', (['[784]'], {}), '([784])\n', (3981, 3988), True, 'import tensorflow as tf\n'), ((4027, 4045), 'tensorflow.zeros', 'tf.zeros', (['[output]'], {}), '([output])\n', (4035, 4045), True, 'import tensorflow as tf\n'), ((4074, 4163), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['input_image', "weights['w_conv1']"], {'strides': '[1, 4, 4, 1]', 'padding': '"""VALID"""'}), "(input_image, weights['w_conv1'], strides=[1, 4, 4, 1], padding\n ='VALID')\n", (4086, 4163), True, 'import tensorflow as tf\n'), ((4204, 4282), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['conv1', "weights['w_conv2']"], {'strides': '[1, 2, 2, 1]', 'padding': '"""VALID"""'}), "(conv1, weights['w_conv2'], strides=[1, 2, 2, 1], padding='VALID')\n", (4216, 4282), True, 'import tensorflow as tf\n'), ((4328, 4406), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['conv2', "weights['w_conv3']"], {'strides': '[1, 1, 1, 1]', 'padding': '"""VALID"""'}), "(conv2, weights['w_conv3'], strides=[1, 1, 1, 1], padding='VALID')\n", (4340, 4406), True, 'import tensorflow as tf\n'), ((4498, 4537), 'tensorflow.matmul', 'tf.matmul', (['conv3_flat', "weights['w_fc4']"], {}), "(conv3_flat, weights['w_fc4'])\n", (4507, 4537), True, 'import tensorflow as tf\n'), ((5109, 5138), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['(1e-06)'], {}), '(1e-06)\n', (5131, 5138), True, 'import tensorflow as tf\n'), ((5522, 5551), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (5549, 5551), True, 'import tensorflow as tf\n'), ((5926, 5958), 'numpy.zeros', 'np.zeros', (['[output]'], {'dtype': 'np.int'}), '([output], dtype=np.int)\n', (5934, 5958), True, 'import numpy as np\n'), ((6451, 6498), 'cv2.threshold', 'cv2.threshold', (['image', '(1)', '(255)', 'cv2.THRESH_BINARY'], {}), '(image, 1, 255, cv2.THRESH_BINARY)\n', (6464, 6498), False, 'import cv2\n'), ((6520, 6551), 'numpy.reshape', 'np.reshape', (['image', '(80, 100, 1)'], {}), '(image, (80, 100, 1))\n', (6530, 6551), True, 'import numpy as np\n'), ((6585, 6638), 'numpy.append', 'np.append', (['image', 'input_image_data[:, :, 0:3]'], {'axis': '(2)'}), '(image, input_image_data[:, :, 0:3], axis=2)\n', (6594, 6638), True, 'import numpy as np\n'), ((5976, 5991), 'random.random', 'random.random', ([], {}), '()\n', (5989, 5991), False, 'import random\n'), ((6041, 6065), 'random.randrange', 'random.randrange', (['output'], {}), '(output)\n', (6057, 6065), False, 'import random\n'), ((6113, 6132), 'numpy.argmax', 'np.argmax', (['action_t'], {}), '(action_t)\n', (6122, 6132), True, 'import numpy as np\n'), ((6375, 6403), 'cv2.resize', 'cv2.resize', (['image', '(100, 80)'], {}), '(image, (100, 80))\n', (6385, 6403), False, 'import cv2\n'), ((6851, 6874), 'random.sample', 'random.sample', (['D', 'BATCH'], {}), '(D, BATCH)\n', (6864, 6874), False, 'import random\n'), ((7387, 7407), 'numpy.max', 'np.max', (['out_batch[i]'], {}), '(out_batch[i])\n', (7393, 7407), True, 'import numpy as np\n')]
|
import os
import random
import string
from shapely.geometry import Point, Polygon
from datetime import datetime
from dateutil import parser
def parse_datetime(input_var):
if isinstance(input_var, str):
return parser.parse(input_var).replace(tzinfo=None)
elif input_var is None:
return input_var
return input_var.replace(tzinfo=None)
def datetime_to_str(datetime_obj):
if datetime_obj:
return datetime_obj.strftime("%Y-%m-%d %H:%M:%S")
return None
charset = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!0123456789'
def pre_init_check(required_fields, **kwargs):
missing_fields = []
for field in required_fields:
if not kwargs.get(field):
missing_fields.append(field)
if len(missing_fields):
raise MissingModelFields(missing_fields)
class MissingModelFields(Exception):
def __init__(self, field):
super().__init__("The model is missing {} field(s)".format(field))
"""
This function takes a long lat value and checks
if it lies within the polygon using Shapely. Need
a way to not hardcode polygon points
"""
def poly_pos(lat,long):
position = Point(lat, long)
print(position)
polygon = Polygon([(0, 0), (1, 1), (1, 0)])
if polygon.contains(position):
print("This point lies within the polygon")
else:
print("This point does not lie within the polygon")
|
[
"shapely.geometry.Point",
"dateutil.parser.parse",
"shapely.geometry.Polygon"
] |
[((1180, 1196), 'shapely.geometry.Point', 'Point', (['lat', 'long'], {}), '(lat, long)\n', (1185, 1196), False, 'from shapely.geometry import Point, Polygon\n'), ((1231, 1264), 'shapely.geometry.Polygon', 'Polygon', (['[(0, 0), (1, 1), (1, 0)]'], {}), '([(0, 0), (1, 1), (1, 0)])\n', (1238, 1264), False, 'from shapely.geometry import Point, Polygon\n'), ((222, 245), 'dateutil.parser.parse', 'parser.parse', (['input_var'], {}), '(input_var)\n', (234, 245), False, 'from dateutil import parser\n')]
|
"""
Schema for disclosure objects.
"""
import copy
from .common import sources, extras, identifiers, contact_details,\
fuzzy_datetime_blank, documents
from opencivicdata import common
fuzzy_datetime = copy.deepcopy(fuzzy_datetime_blank)
fuzzy_datetime["blank"] = False
reporting_period_schema = {
"properties": {
'start_date': fuzzy_datetime_blank,
'end_date': fuzzy_datetime_blank,
"timezone": {
"type": "string"
},
'period_type': {
'type': 'string'
},
'description': {
'type': 'string'
},
'authorities': {
'type': 'array',
"items": {
"properties": {
"entity_type": {
"type": "string"
},
"jurisdiction": {
"type": "string"
},
"id": {
"type": "string"
},
"name": {
"type": "string"
}
},
"type": "object",
}
}
},
"type": "object"
}
disclosure_schema = {
"properties": {
"classification": {
"type": ["string", "null"],
"enum": common.DISCLOSURE_CLASSIFICATIONS,
},
"identifiers": identifiers,
"contact_details": contact_details,
"related_entities": {
"items": {
"properties": {
"entity_type": {
"type": "string"
},
"name": {
"type": "string"
},
"note": {
"type": ["string", "null"],
},
},
"type": "object"
},
"type": "array"
},
"submitted_date": {
"type": "datetime"
},
"effective_date": {
"type": "datetime"
},
"timezone": {
"type": "string"
},
"source_identified": {
"type": "boolean",
},
"documents": documents,
"sources": sources,
"extras": extras
},
"type": "object"
}
|
[
"copy.deepcopy"
] |
[((212, 247), 'copy.deepcopy', 'copy.deepcopy', (['fuzzy_datetime_blank'], {}), '(fuzzy_datetime_blank)\n', (225, 247), False, 'import copy\n')]
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch
from torch.optim import Optimizer
class QHM(Optimizer):
r"""
Stochastic gradient method with Quasi-Hyperbolic Momentum (QHM):
h(k) = (1 - \beta) * g(k) + \beta * h(k-1)
d(k) = (1 - \nu) * g(k) + \nu * h(k)
x(k+1) = x(k) - \alpha * d(k)
"Quasi-hyperbolic momentum and Adam for deep learning"
by <NAME> and <NAME>, ICLR 2019
optimizer = QHM(params, lr=-1, momentum=0, qhm_nu=1, weight_decay=0)
Args:
params (iterable): iterable params to optimize or dict of param groups
lr (float): learning rate, \alpha in QHM update (default:-1 need input)
momentum (float, optional): \beta in QHM update, range[0,1) (default:0)
qhm_nu (float, optional): \nu in QHM update, range[0,1] (default: 1)
\nu = 0: SGD without momentum (\beta is ignored)
\nu = 1: SGD with momentum \beta and dampened gradient (1-\beta)
\nu = \beta: SGD with "Nesterov momentum" \beta
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
Example:
>>> optimizer = torch.optim.QHM(model.parameters(), lr=0.1, momentum=0.9)
>>> optimizer.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> optimizer.step()
"""
def __init__(self, params, lr=-1, momentum=0, qhm_nu=1, weight_decay=0):
# nu can take values outside of the interval [0,1], but no guarantee of convergence?
if lr <= 0:
raise ValueError("Invalid value for learning rate (>0): {}".format(lr))
if momentum < 0 or momentum > 1:
raise ValueError("Invalid value for momentum [0,1): {}".format(momentum))
if weight_decay < 0:
raise ValueError("Invalid value for weight_decay (>=0): {}".format(weight_decay))
defaults = dict(lr=lr, momentum=momentum, qhm_nu=qhm_nu, weight_decay=weight_decay)
super(QHM, self).__init__(params, defaults)
# extra_buffer == True only in SSLS with momentum > 0 and nu != 1
self.state['allocate_step_buffer'] = False
def step(self, closure=None):
"""
Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates model and returns loss.
"""
loss = None
if closure is not None:
loss = closure()
self.add_weight_decay()
self.qhm_direction()
self.qhm_update()
return loss
def add_weight_decay(self):
# weight_decay is the same as adding L2 regularization
for group in self.param_groups:
weight_decay = group['weight_decay']
for p in group['params']:
if p.grad is None:
continue
if weight_decay > 0:
p.grad.data.add_(p.data, alpha=weight_decay)
def qhm_direction(self):
for group in self.param_groups:
momentum = group['momentum']
qhm_nu = group['qhm_nu']
for p in group['params']:
if p.grad is None:
continue
x = p.data # Optimization parameters
g = p.grad.data # Stochastic gradient
# Compute the (negative) step directoin d and necessary momentum
state = self.state[p]
if abs(momentum) < 1e-12 or abs(qhm_nu) < 1e-12: # simply SGD if beta=0 or nu=0
d = state['step_buffer'] = g
else:
if 'momentum_buffer' not in state:
h = state['momentum_buffer'] = torch.zeros_like(x)
else:
h = state['momentum_buffer']
# Update momentum buffer: h(k) = (1 - \beta) * g(k) + \beta * h(k-1)
h.mul_(momentum).add_(g, alpha=1 - momentum)
if abs(qhm_nu - 1) < 1e-12: # if nu=1, then same as SGD with momentum
d = state['step_buffer'] = h
else:
if self.state['allocate_step_buffer']: # copy from gradient
if 'step_buffer' not in state:
state['step_buffer'] = torch.zeros_like(g)
d = state['step_buffer'].copy_(g)
else: # use gradient buffer
d = state['step_buffer'] = g
# Compute QHM momentum: d(k) = (1 - \nu) * g(k) + \nu * h(k)
d.mul_(1 - qhm_nu).add_(h, alpha=qhm_nu)
def qhm_update(self):
"""
Perform QHM update, need to call compute_qhm_direction() before calling this.
"""
for group in self.param_groups:
for p in group['params']:
if p.grad is not None:
p.data.add_(self.state[p]['step_buffer'], alpha=-group['lr'])
|
[
"torch.zeros_like"
] |
[((3707, 3726), 'torch.zeros_like', 'torch.zeros_like', (['x'], {}), '(x)\n', (3723, 3726), False, 'import torch\n'), ((4330, 4349), 'torch.zeros_like', 'torch.zeros_like', (['g'], {}), '(g)\n', (4346, 4349), False, 'import torch\n')]
|
from urllib import response
from django.shortcuts import render
from django.http import HttpResponseRedirect
from datetime import datetime
from .models import Ventas
from .forms import FormVentas
def eliminar(request,id):
obj = Ventas.objects.get(id_factura=id)
if request.method =="GET":
obj.delete()
return HttpResponseRedirect("/ventas/lista")
def lista(request):
context ={}
context["dataset"] = Ventas.objects.all()
return render(request, "consulta.html", context)
def mensaje(request):
return render(request,"mensaje.html")
# Create your views here.
def crear_venta(request):
if request.method == 'POST':
form = FormVentas(request.POST)
if form.is_valid():
venta = Ventas(
id_factura = form.cleaned_data['id_factura'],
rama= form.cleaned_data['rama'],
ciudad= form.cleaned_data['ciudad'],
tipo_cliente= form.cleaned_data['tipo_cliente'],
genero= form.cleaned_data['genero'],
linea_de_producto= form.cleaned_data['linea_de_producto'],
precio_por_unidad= form.cleaned_data['precio_por_unidad'],
cantidad= form.cleaned_data['cantidad'],
impuesto= form.cleaned_data['impuesto'],
total= form.cleaned_data['total'],
fecha_hora = datetime.now()
)
venta.save()
return HttpResponseRedirect('/ventas/mensaje')
else:
form = FormVentas()
return render(request,"formulario.html",{'form': form})
|
[
"django.shortcuts.render",
"django.http.HttpResponseRedirect",
"datetime.datetime.now"
] |
[((358, 395), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['"""/ventas/lista"""'], {}), "('/ventas/lista')\n", (378, 395), False, 'from django.http import HttpResponseRedirect\n'), ((499, 540), 'django.shortcuts.render', 'render', (['request', '"""consulta.html"""', 'context'], {}), "(request, 'consulta.html', context)\n", (505, 540), False, 'from django.shortcuts import render\n'), ((575, 606), 'django.shortcuts.render', 'render', (['request', '"""mensaje.html"""'], {}), "(request, 'mensaje.html')\n", (581, 606), False, 'from django.shortcuts import render\n'), ((1625, 1675), 'django.shortcuts.render', 'render', (['request', '"""formulario.html"""', "{'form': form}"], {}), "(request, 'formulario.html', {'form': form})\n", (1631, 1675), False, 'from django.shortcuts import render\n'), ((1531, 1570), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['"""/ventas/mensaje"""'], {}), "('/ventas/mensaje')\n", (1551, 1570), False, 'from django.http import HttpResponseRedirect\n'), ((1458, 1472), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1470, 1472), False, 'from datetime import datetime\n')]
|
#!/usr/bin/env python
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test OGR XPlane driver functionality.
# Author: <NAME> <even dot rouault at mines dash paris dot org>
#
###############################################################################
# Copyright (c) 2008, <NAME> <even dot rouault at mines dash paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import sys
import string
sys.path.append( '../pymod' )
import ogrtest
import gdaltest
from osgeo import ogr
###############################################################################
# Test apt.dat reading
def ogr_xplane_apt_dat():
xplane_apt_ds = ogr.Open( 'data/apt.dat' )
if xplane_apt_ds is None:
return 'fail'
layers = [ ( 'APT' , 8, [ ('apt_icao', 'E46') ] ),
( 'RunwayPolygon' , 19, [ ('apt_icao', 'E46') ] ),
( 'RunwayThreshold' , 44, [ ('apt_icao', 'E46') ] ),
( 'WaterRunwayPolygon' , 1, [ ('apt_icao', 'I38') ] ),
( 'WaterRunwayThreshold' , 2, [ ('apt_icao', 'I38') ] ),
( 'Helipad' , 2, [ ('apt_icao', 'CYXX') ] ),
( 'HelipadPolygon' , 2, [ ('apt_icao', 'CYXX') ] ),
( 'TaxiwayRectangle' , 437, [ ('apt_icao', 'LFPG') ] ),
( 'Pavement' , 11, [ ('apt_icao', 'CYXX') ] ),
( 'APTBoundary' , 1, [ ('apt_icao', 'VTX2') ] ),
( 'APTLinearFeature' , 45, [ ('apt_icao', 'CYXX') ] ),
( 'ATCFreq' , 42, [ ('apt_icao', 'CYXX') ] ),
( 'StartupLocation' , 110, [ ('apt_icao', 'CYXX') ] ),
( 'APTLightBeacon' , 3, [ ('apt_icao', 'CYXX') ] ),
( 'APTWindsock' , 25, [ ('apt_icao', 'E46') ] ),
( 'TaxiwaySign' , 17, [ ('apt_icao', 'CYXX') ] ),
( 'VASI_PAPI_WIGWAG' , 30, [ ('apt_icao', 'CYXX') ] ),
( 'Stopway' , 6, [ ('apt_icao', 'LFPM') ] ),
]
for layer in layers:
lyr = xplane_apt_ds.GetLayerByName( layer[0] )
if lyr.GetFeatureCount() != layer[1] :
gdaltest.post_reason( 'wrong number of features for layer %s : %d. %d were expected ' % (layer[0], lyr.GetFeatureCount(), layer[1]) )
return 'fail'
feat_read = lyr.GetNextFeature()
for item in layer[2]:
if feat_read.GetField(item[0]) != item[1]:
print(layer[0])
print(item[0])
print(feat_read.GetField(item[0]))
return 'fail'
return 'success'
###############################################################################
# Test apt.dat v810 reading
def ogr_xplane_apt_v810_dat():
xplane_apt_ds = ogr.Open( 'data/apt810/apt.dat' )
if xplane_apt_ds is None:
return 'fail'
layers = [ ( 'APT' , 6, [ ('apt_icao', 'UHP1') ] ),
( 'RunwayPolygon' , 6, [ ('apt_icao', 'UHP1') ] ),
( 'RunwayThreshold' , 13, [ ('apt_icao', 'UHP1') ] ),
( 'WaterRunwayPolygon' , 2, [ ('apt_icao', '6MA8') ] ),
( 'WaterRunwayThreshold' , 4, [ ('apt_icao', '6MA8') ] ),
( 'Helipad' , 1, [ ('apt_icao', '9FD6') ] ),
( 'HelipadPolygon' , 1, [ ('apt_icao', '9FD6') ] ),
( 'TaxiwayRectangle' , 54, [ ('apt_icao', 'UHP1') ] ),
( 'Pavement' , 0, [ ] ),
( 'APTBoundary' , 0, [ ] ),
( 'APTLinearFeature' , 0, [ ] ),
( 'ATCFreq' , 10, [ ('apt_icao', 'EHVB') ] ),
( 'StartupLocation' , 0, [ ] ),
( 'APTLightBeacon' , 2, [ ('apt_icao', '7I6') ] ),
( 'APTWindsock' , 9, [ ('apt_icao', 'UHP1') ] ),
( 'TaxiwaySign' , 0, [ ] ),
( 'VASI_PAPI_WIGWAG' , 12, [ ('apt_icao', 'UHP1') ] ),
( 'Stopway' , 4, [ ('apt_icao', 'EKYT' ) ] ),
]
for layer in layers:
lyr = xplane_apt_ds.GetLayerByName( layer[0] )
if lyr.GetFeatureCount() != layer[1] :
gdaltest.post_reason( 'wrong number of features for layer %s : %d. %d were expected ' % (layer[0], lyr.GetFeatureCount(), layer[1]) )
return 'fail'
feat_read = lyr.GetNextFeature()
for item in layer[2]:
if feat_read.GetField(item[0]) != item[1]:
print(layer[0])
print(item[0])
print(feat_read.GetField(item[0]))
return 'fail'
return 'success'
###############################################################################
# Test nav.dat reading
def ogr_xplane_nav_dat():
xplane_nav_ds = ogr.Open( 'data/nav.dat' )
if xplane_nav_ds is None:
return 'fail'
layers = [ ( 'ILS' , 6, [ ('navaid_id', 'IMQS') ] ),
( 'VOR' , 3, [ ('navaid_id', 'AAL') ] ),
( 'NDB' , 4, [ ('navaid_id', 'APH') ] ),
( 'GS' , 1, [ ('navaid_id', 'IMQS') ] ),
( 'Marker' , 3, [ ('apt_icao', '40N') ] ),
( 'DME' , 6, [ ('navaid_id', 'AAL') ] ),
( 'DMEILS' , 1, [ ('navaid_id', 'IWG') ] )
]
for layer in layers:
lyr = xplane_nav_ds.GetLayerByName( layer[0] )
if lyr.GetFeatureCount() != layer[1] :
gdaltest.post_reason( 'wrong number of features for layer %s : %d. %d were expected ' % (layer[0], lyr.GetFeatureCount(), layer[1]) )
return 'fail'
feat_read = lyr.GetNextFeature()
for item in layer[2]:
if feat_read.GetField(item[0]) != item[1]:
print(layer[0])
print(item[0])
print(feat_read.GetField(item[0]))
return 'fail'
xplane_nav_ds = None
return 'success'
###############################################################################
# Test awy.dat reading
def ogr_xplane_awy_dat():
xplane_awy_ds = ogr.Open( 'data/awy.dat' )
if xplane_awy_ds is None:
return 'fail'
layers = [ ( 'AirwaySegment' , 11, [ ('segment_name', 'R464') ] ),
( 'AirwayIntersection' , 14, [ ('name', '00MKK') ] )
]
for layer in layers:
lyr = xplane_awy_ds.GetLayerByName( layer[0] )
if lyr.GetFeatureCount() != layer[1] :
gdaltest.post_reason( 'wrong number of features for layer %s : %d. %d were expected ' % (layer[0], lyr.GetFeatureCount(), layer[1]) )
return 'fail'
feat_read = lyr.GetNextFeature()
for item in layer[2]:
if feat_read.GetField(item[0]) != item[1]:
print(layer[0])
print(item[0])
print(feat_read.GetField(item[0]))
return 'fail'
return 'success'
###############################################################################
# Test fix.dat reading
def ogr_xplane_fix_dat():
xplane_fix_ds = ogr.Open( 'data/fix.dat' )
if xplane_fix_ds is None:
return 'fail'
layers = [ ( 'FIX' , 1, [ ('fix_name', '00MKK') ] )
]
for layer in layers:
lyr = xplane_fix_ds.GetLayerByName( layer[0] )
if lyr.GetFeatureCount() != layer[1] :
gdaltest.post_reason( 'wrong number of features for layer %s : %d. %d were expected ' % (layer[0], lyr.GetFeatureCount(), layer[1]) )
return 'fail'
feat_read = lyr.GetNextFeature()
for item in layer[2]:
if feat_read.GetField(item[0]) != item[1]:
print(layer[0])
print(item[0])
print(feat_read.GetField(item[0]))
return 'fail'
return 'success'
###############################################################################
#
gdaltest_list = [
ogr_xplane_apt_dat,
ogr_xplane_apt_v810_dat,
ogr_xplane_nav_dat,
ogr_xplane_awy_dat,
ogr_xplane_fix_dat ]
if __name__ == '__main__':
gdaltest.setup_run( 'ogr_xplane' )
gdaltest.run_tests( gdaltest_list )
gdaltest.summarize()
|
[
"sys.path.append",
"osgeo.ogr.Open",
"gdaltest.setup_run",
"gdaltest.summarize",
"gdaltest.run_tests"
] |
[((1594, 1621), 'sys.path.append', 'sys.path.append', (['"""../pymod"""'], {}), "('../pymod')\n", (1609, 1621), False, 'import sys\n'), ((1830, 1854), 'osgeo.ogr.Open', 'ogr.Open', (['"""data/apt.dat"""'], {}), "('data/apt.dat')\n", (1838, 1854), False, 'from osgeo import ogr\n'), ((4028, 4059), 'osgeo.ogr.Open', 'ogr.Open', (['"""data/apt810/apt.dat"""'], {}), "('data/apt810/apt.dat')\n", (4036, 4059), False, 'from osgeo import ogr\n'), ((6123, 6147), 'osgeo.ogr.Open', 'ogr.Open', (['"""data/nav.dat"""'], {}), "('data/nav.dat')\n", (6131, 6147), False, 'from osgeo import ogr\n'), ((7501, 7525), 'osgeo.ogr.Open', 'ogr.Open', (['"""data/awy.dat"""'], {}), "('data/awy.dat')\n", (7509, 7525), False, 'from osgeo import ogr\n'), ((8489, 8513), 'osgeo.ogr.Open', 'ogr.Open', (['"""data/fix.dat"""'], {}), "('data/fix.dat')\n", (8497, 8513), False, 'from osgeo import ogr\n'), ((9513, 9545), 'gdaltest.setup_run', 'gdaltest.setup_run', (['"""ogr_xplane"""'], {}), "('ogr_xplane')\n", (9531, 9545), False, 'import gdaltest\n'), ((9553, 9586), 'gdaltest.run_tests', 'gdaltest.run_tests', (['gdaltest_list'], {}), '(gdaltest_list)\n', (9571, 9586), False, 'import gdaltest\n'), ((9594, 9614), 'gdaltest.summarize', 'gdaltest.summarize', ([], {}), '()\n', (9612, 9614), False, 'import gdaltest\n')]
|
from collections.abc import Sequence
from dataclasses import fields
import libcst as cst
from buglab.representations.codereprs import PythonCodeRelations
from buglab.utils.cstutils import is_whitespace_node
__all__ = ["AstRelations"]
class AstRelations(cst.CSTVisitor):
def __init__(self, code_relations: PythonCodeRelations):
super().__init__()
self.__code_relations = code_relations
def __named_children(self, node: cst.CSTNode):
names = {}
for f in fields(node):
child_attribute = getattr(node, f.name)
if isinstance(child_attribute, Sequence):
for c in child_attribute:
names[c] = f.name
else:
names[child_attribute] = f.name
for child in node.children:
yield child, names[child]
def on_visit(self, node: cst.CSTNode) -> bool:
if is_whitespace_node(node):
return False
prev_child = None
for child, child_name in self.__named_children(node):
if is_whitespace_node(child):
continue
self.__code_relations.add_relation("Child", node, child, child_name)
if prev_child is not None:
self.__code_relations.add_relation("Sibling", prev_child, child)
prev_child = child
return True
@staticmethod
def add_ast_relations(code_rels: PythonCodeRelations, *args) -> None:
code_rels.ast_with_metadata_wrapper.visit(AstRelations(code_rels))
|
[
"dataclasses.fields",
"buglab.utils.cstutils.is_whitespace_node"
] |
[((498, 510), 'dataclasses.fields', 'fields', (['node'], {}), '(node)\n', (504, 510), False, 'from dataclasses import fields\n'), ((902, 926), 'buglab.utils.cstutils.is_whitespace_node', 'is_whitespace_node', (['node'], {}), '(node)\n', (920, 926), False, 'from buglab.utils.cstutils import is_whitespace_node\n'), ((1057, 1082), 'buglab.utils.cstutils.is_whitespace_node', 'is_whitespace_node', (['child'], {}), '(child)\n', (1075, 1082), False, 'from buglab.utils.cstutils import is_whitespace_node\n')]
|
# noqa: D100
import json
import logging
import os
import subprocess
from typing import List, Optional, Union
import hail as hl
from gnomad.resources.resource_utils import VersionedTableResource
logging.basicConfig(format="%(levelname)s (%(name)s %(lineno)s): %(message)s")
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# Note that this is the current as of v81 with some included for backwards compatibility (VEP <= 75)
CSQ_CODING_HIGH_IMPACT = [
"transcript_ablation",
"splice_acceptor_variant",
"splice_donor_variant",
"stop_gained",
"frameshift_variant",
"stop_lost",
]
CSQ_CODING_MEDIUM_IMPACT = [
"start_lost", # new in v81
"initiator_codon_variant", # deprecated
"transcript_amplification",
"inframe_insertion",
"inframe_deletion",
"missense_variant",
"protein_altering_variant", # new in v79
"splice_region_variant",
]
CSQ_CODING_LOW_IMPACT = [
"incomplete_terminal_codon_variant",
"start_retained_variant", # new in v92
"stop_retained_variant",
"synonymous_variant",
"coding_sequence_variant",
]
CSQ_NON_CODING = [
"mature_miRNA_variant",
"5_prime_UTR_variant",
"3_prime_UTR_variant",
"non_coding_transcript_exon_variant",
"non_coding_exon_variant", # deprecated
"intron_variant",
"NMD_transcript_variant",
"non_coding_transcript_variant",
"nc_transcript_variant", # deprecated
"upstream_gene_variant",
"downstream_gene_variant",
"TFBS_ablation",
"TFBS_amplification",
"TF_binding_site_variant",
"regulatory_region_ablation",
"regulatory_region_amplification",
"feature_elongation",
"regulatory_region_variant",
"feature_truncation",
"intergenic_variant",
]
CSQ_ORDER = (
CSQ_CODING_HIGH_IMPACT
+ CSQ_CODING_MEDIUM_IMPACT
+ CSQ_CODING_LOW_IMPACT
+ CSQ_NON_CODING
)
POSSIBLE_REFS = ("GRCh37", "GRCh38")
"""
Constant containing supported references
"""
VEP_CONFIG_PATH = "file:///vep_data/vep-gcloud.json"
"""
Constant that contains the local path to the VEP config file
"""
VEP_CSQ_FIELDS = "Allele|Consequence|IMPACT|SYMBOL|Gene|Feature_type|Feature|BIOTYPE|EXON|INTRON|HGVSc|HGVSp|cDNA_position|CDS_position|Protein_position|Amino_acids|Codons|ALLELE_NUM|DISTANCE|STRAND|VARIANT_CLASS|MINIMISED|SYMBOL_SOURCE|HGNC_ID|CANONICAL|TSL|APPRIS|CCDS|ENSP|SWISSPROT|TREMBL|UNIPARC|GENE_PHENO|SIFT|PolyPhen|DOMAINS|HGVS_OFFSET|MOTIF_NAME|MOTIF_POS|HIGH_INF_POS|MOTIF_SCORE_CHANGE|LoF|LoF_filter|LoF_flags|LoF_info"
"""
Constant that defines the order of VEP annotations used in VCF export.
"""
VEP_CSQ_HEADER = f"Consequence annotations from Ensembl VEP. Format: {VEP_CSQ_FIELDS}"
"""
Constant that contains description for VEP used in VCF export.
"""
LOFTEE_LABELS = ["HC", "LC", "OS"]
"""
Constant that contains annotations added by LOFTEE.
"""
LOF_CSQ_SET = {
"splice_acceptor_variant",
"splice_donor_variant",
"stop_gained",
"frameshift_variant",
}
"""
Set containing loss-of-function consequence strings.
"""
def get_vep_help(vep_config_path: Optional[str] = None):
"""
Return the output of vep --help which includes the VEP version.
.. warning::
If no `vep_config_path` is supplied, this function will only work for Dataproc clusters
created with `hailctl dataproc start --vep`. It assumes that the command is `/path/to/vep`.
:param vep_config_path: Optional path to use as the VEP config file. If None, `VEP_CONFIG_URI` environment variable is used
:return: VEP help string
"""
if vep_config_path is None:
vep_config_path = os.environ["VEP_CONFIG_URI"]
with hl.hadoop_open(vep_config_path) as vep_config_file:
vep_config = json.load(vep_config_file)
vep_command = vep_config["command"]
vep_help = subprocess.check_output([vep_command[0]]).decode("utf-8")
return vep_help
def get_vep_context(ref: Optional[str] = None) -> VersionedTableResource:
"""
Get VEP context resource for the genome build `ref`.
:param ref: Genome build. If None, `hl.default_reference` is used
:return: VEPed context resource
"""
import gnomad.resources.grch37.reference_data as grch37
import gnomad.resources.grch38.reference_data as grch38
if ref is None:
ref = hl.default_reference().name
if ref not in POSSIBLE_REFS:
raise ValueError(
f'get_vep_context passed {ref}. Expected one of {", ".join(POSSIBLE_REFS)}'
)
vep_context = grch37.vep_context if ref == "GRCh37" else grch38.vep_context
return vep_context
def vep_or_lookup_vep(
ht, reference_vep_ht=None, reference=None, vep_config_path=None, vep_version=None
):
"""
VEP a table, or lookup variants in a reference database.
.. warning::
If `reference_vep_ht` is supplied, no check is performed to confirm `reference_vep_ht` was
generated with the same version of VEP / VEP configuration as the VEP referenced in `vep_config_path`.
:param ht: Input Table
:param reference_vep_ht: A reference database with VEP annotations (must be in top-level `vep`)
:param reference: If reference_vep_ht is not specified, find a suitable one in reference (if None, grabs from hl.default_reference)
:param vep_config_path: vep_config to pass to hl.vep (if None, a suitable one for `reference` is chosen)
:param vep_version: Version of VEPed context Table to use (if None, the default `vep_context` resource will be used)
:return: VEPed Table
"""
if reference is None:
reference = hl.default_reference().name
if vep_config_path is None:
vep_config_path = VEP_CONFIG_PATH
vep_help = get_vep_help(vep_config_path)
with hl.hadoop_open(vep_config_path) as vep_config_file:
vep_config = vep_config_file.read()
if reference_vep_ht is None:
if reference not in POSSIBLE_REFS:
raise ValueError(
f'vep_or_lookup_vep got {reference}. Expected one of {", ".join(POSSIBLE_REFS)}'
)
vep_context = get_vep_context(reference)
if vep_version is None:
vep_version = vep_context.default_version
if vep_version not in vep_context.versions:
logger.warning(
"No VEPed context Table available for genome build %s and VEP version %s, "
"all variants will be VEPed using the following VEP:\n%s",
reference,
vep_version,
vep_help,
)
return hl.vep(ht, vep_config_path)
logger.info(
"Using VEPed context Table from genome build %s and VEP version %s",
reference,
vep_version,
)
reference_vep_ht = vep_context.versions[vep_version].ht()
vep_context_help = hl.eval(reference_vep_ht.vep_help)
vep_context_config = hl.eval(reference_vep_ht.vep_config)
assert vep_help == vep_context_help, (
f"The VEP context HT version does not match the version referenced in the VEP config file."
f"\nVEP context:\n{vep_context_help}\n\n VEP config:\n{vep_help}"
)
assert vep_config == vep_context_config, (
f"The VEP context HT configuration does not match the configuration in {vep_config_path}."
f"\nVEP context:\n{vep_context_config}\n\n Current config:\n{vep_config}"
)
ht = ht.annotate(vep=reference_vep_ht[ht.key].vep)
vep_ht = ht.filter(hl.is_defined(ht.vep))
revep_ht = ht.filter(hl.is_missing(ht.vep))
revep_ht = hl.vep(revep_ht, vep_config_path)
return vep_ht.union(revep_ht)
def add_most_severe_consequence_to_consequence(
tc: hl.expr.StructExpression,
) -> hl.expr.StructExpression:
"""
Add most_severe_consequence annotation to transcript consequences.
This is for a given transcript, as there are often multiple annotations for a single transcript:
e.g. splice_region_variant&intron_variant -> splice_region_variant
"""
csqs = hl.literal(CSQ_ORDER)
return tc.annotate(
most_severe_consequence=csqs.find(lambda c: tc.consequence_terms.contains(c))
)
def process_consequences(
mt: Union[hl.MatrixTable, hl.Table],
vep_root: str = "vep",
penalize_flags: bool = True,
) -> Union[hl.MatrixTable, hl.Table]:
"""
Add most_severe_consequence into [vep_root].transcript_consequences, and worst_csq_by_gene, any_lof into [vep_root].
`most_severe_consequence` is the worst consequence for a transcript.
:param mt: Input MT
:param vep_root: Root for vep annotation (probably vep)
:param penalize_flags: Whether to penalize LOFTEE flagged variants, or treat them as equal to HC
:return: MT with better formatted consequences
"""
csqs = hl.literal(CSQ_ORDER)
csq_dict = hl.literal(dict(zip(CSQ_ORDER, range(len(CSQ_ORDER)))))
def find_worst_transcript_consequence(
tcl: hl.expr.ArrayExpression,
) -> hl.expr.StructExpression:
"""Get worst transcript_consequence from an array of em."""
flag_score = 500
no_flag_score = flag_score * (1 + penalize_flags)
def csq_score(tc):
return csq_dict[csqs.find(lambda x: x == tc.most_severe_consequence)]
tcl = tcl.map(
lambda tc: tc.annotate(
csq_score=hl.case(missing_false=True)
.when(
(tc.lof == "HC") & (tc.lof_flags == ""),
csq_score(tc) - no_flag_score,
)
.when(
(tc.lof == "HC") & (tc.lof_flags != ""), csq_score(tc) - flag_score
)
.when(tc.lof == "OS", csq_score(tc) - 20)
.when(tc.lof == "LC", csq_score(tc) - 10)
.when(
tc.polyphen_prediction == "probably_damaging", csq_score(tc) - 0.5
)
.when(
tc.polyphen_prediction == "possibly_damaging", csq_score(tc) - 0.25
)
.when(tc.polyphen_prediction == "benign", csq_score(tc) - 0.1)
.default(csq_score(tc))
)
)
return hl.or_missing(hl.len(tcl) > 0, hl.sorted(tcl, lambda x: x.csq_score)[0])
transcript_csqs = mt[vep_root].transcript_consequences.map(
add_most_severe_consequence_to_consequence
)
gene_dict = transcript_csqs.group_by(lambda tc: tc.gene_symbol)
worst_csq_gene = gene_dict.map_values(find_worst_transcript_consequence).values()
sorted_scores = hl.sorted(worst_csq_gene, key=lambda tc: tc.csq_score)
canonical = transcript_csqs.filter(lambda csq: csq.canonical == 1)
gene_canonical_dict = canonical.group_by(lambda tc: tc.gene_symbol)
worst_csq_gene_canonical = gene_canonical_dict.map_values(
find_worst_transcript_consequence
).values()
sorted_canonical_scores = hl.sorted(
worst_csq_gene_canonical, key=lambda tc: tc.csq_score
)
vep_data = mt[vep_root].annotate(
transcript_consequences=transcript_csqs,
worst_consequence_term=csqs.find(
lambda c: transcript_csqs.map(
lambda csq: csq.most_severe_consequence
).contains(c)
),
worst_csq_by_gene=sorted_scores,
worst_csq_for_variant=hl.or_missing(
hl.len(sorted_scores) > 0, sorted_scores[0]
),
worst_csq_by_gene_canonical=sorted_canonical_scores,
worst_csq_for_variant_canonical=hl.or_missing(
hl.len(sorted_canonical_scores) > 0, sorted_canonical_scores[0]
),
)
return (
mt.annotate_rows(**{vep_root: vep_data})
if isinstance(mt, hl.MatrixTable)
else mt.annotate(**{vep_root: vep_data})
)
def filter_vep_to_canonical_transcripts(
mt: Union[hl.MatrixTable, hl.Table], vep_root: str = "vep"
) -> Union[hl.MatrixTable, hl.Table]:
"""Filter VEP transcript consequences to those in the canonical transcript."""
canonical = mt[vep_root].transcript_consequences.filter(
lambda csq: csq.canonical == 1
)
vep_data = mt[vep_root].annotate(transcript_consequences=canonical)
return (
mt.annotate_rows(**{vep_root: vep_data})
if isinstance(mt, hl.MatrixTable)
else mt.annotate(**{vep_root: vep_data})
)
def filter_vep_to_synonymous_variants(
mt: Union[hl.MatrixTable, hl.Table], vep_root: str = "vep"
) -> Union[hl.MatrixTable, hl.Table]:
"""Filter VEP transcript consequences to those with a most severe consequence of synonymous_variant."""
synonymous = mt[vep_root].transcript_consequences.filter(
lambda csq: csq.most_severe_consequence == "synonymous_variant"
)
vep_data = mt[vep_root].annotate(transcript_consequences=synonymous)
return (
mt.annotate_rows(**{vep_root: vep_data})
if isinstance(mt, hl.MatrixTable)
else mt.annotate(**{vep_root: vep_data})
)
def vep_struct_to_csq(
vep_expr: hl.expr.StructExpression, csq_fields: str = VEP_CSQ_FIELDS
) -> hl.expr.ArrayExpression:
"""
Given a VEP Struct, returns and array of VEP VCF CSQ strings (one per consequence in the struct).
The fields and their order will correspond to those passed in `csq_fields`, which corresponds to the
VCF header that is required to interpret the VCF CSQ INFO field.
Note that the order is flexible and that all fields that are in the default value are supported.
These fields will be formatted in the same way that their VEP CSQ counterparts are.
While other fields can be added if their name are the same as those in the struct. Their value will be the result of calling
hl.str(), so it may differ from their usual VEP CSQ representation.
:param vep_expr: The input VEP Struct
:param csq_fields: The | delimited list of fields to include in the CSQ (in that order)
:return: The corresponding CSQ strings
"""
_csq_fields = [f.lower() for f in csq_fields.split("|")]
def get_csq_from_struct(
element: hl.expr.StructExpression, feature_type: str
) -> hl.expr.StringExpression:
# Most fields are 1-1, just lowercase
fields = dict(element)
# Add general exceptions
fields.update(
{
"allele": element.variant_allele,
"consequence": hl.delimit(element.consequence_terms, delimiter="&"),
"feature_type": feature_type,
"feature": (
element.transcript_id
if "transcript_id" in element
else element.regulatory_feature_id
if "regulatory_feature_id" in element
else element.motif_feature_id
if "motif_feature_id" in element
else ""
),
"variant_class": vep_expr.variant_class,
}
)
# Add exception for transcripts
if feature_type == "Transcript":
fields.update(
{
"canonical": hl.cond(element.canonical == 1, "YES", ""),
"ensp": element.protein_id,
"gene": element.gene_id,
"symbol": element.gene_symbol,
"symbol_source": element.gene_symbol_source,
"cdna_position": hl.str(element.cdna_start)
+ hl.cond(
element.cdna_start == element.cdna_end,
"",
"-" + hl.str(element.cdna_end),
),
"cds_position": hl.str(element.cds_start)
+ hl.cond(
element.cds_start == element.cds_end,
"",
"-" + hl.str(element.cds_end),
),
"protein_position": hl.str(element.protein_start)
+ hl.cond(
element.protein_start == element.protein_end,
"",
"-" + hl.str(element.protein_end),
),
"sift": element.sift_prediction
+ "("
+ hl.format("%.3f", element.sift_score)
+ ")",
"polyphen": element.polyphen_prediction
+ "("
+ hl.format("%.3f", element.polyphen_score)
+ ")",
"domains": hl.delimit(
element.domains.map(lambda d: d.db + ":" + d.name), "&"
),
}
)
elif feature_type == "MotifFeature":
fields["motif_score_change"] = hl.format("%.3f", element.motif_score_change)
return hl.delimit(
[hl.or_else(hl.str(fields.get(f, "")), "") for f in _csq_fields], "|"
)
csq = hl.empty_array(hl.tstr)
for feature_field, feature_type in [
("transcript_consequences", "Transcript"),
("regulatory_feature_consequences", "RegulatoryFeature"),
("motif_feature_consequences", "MotifFeature"),
("intergenic_consequences", "Intergenic"),
]:
csq = csq.extend(
hl.or_else(
vep_expr[feature_field].map(
lambda x: get_csq_from_struct(x, feature_type=feature_type)
),
hl.empty_array(hl.tstr),
)
)
return hl.or_missing(hl.len(csq) > 0, csq)
def get_most_severe_consequence_for_summary(
ht: hl.Table,
csq_order: List[str] = CSQ_ORDER,
loftee_labels: List[str] = LOFTEE_LABELS,
) -> hl.Table:
"""
Prepare a hail Table for summary statistics generation.
Adds the following annotations:
- most_severe_csq: Most severe consequence for variant
- protein_coding: Whether the variant is present on a protein-coding transcript
- lof: Whether the variant is a loss-of-function variant
- no_lof_flags: Whether the variant has any LOFTEE flags (True if no flags)
Assumes input Table is annotated with VEP and that VEP annotations have been filtered to canonical transcripts.
:param ht: Input Table.
:param csq_order: Order of VEP consequences, sorted from high to low impact. Default is CSQ_ORDER.
:param loftee_labels: Annotations added by LOFTEE. Default is LOFTEE_LABELS.
:return: Table annotated with VEP summary annotations.
"""
def _get_most_severe_csq(
csq_list: hl.expr.ArrayExpression, protein_coding: bool
) -> hl.expr.StructExpression:
"""
Process VEP consequences to generate summary annotations.
:param csq_list: VEP consequences list to be processed.
:param protein_coding: Whether variant is in a protein-coding transcript.
:return: Struct containing summary annotations.
"""
lof = hl.null(hl.tstr)
no_lof_flags = hl.null(hl.tbool)
if protein_coding:
all_lofs = csq_list.map(lambda x: x.lof)
lof = hl.literal(loftee_labels).find(lambda x: all_lofs.contains(x))
csq_list = hl.if_else(
hl.is_defined(lof), csq_list.filter(lambda x: x.lof == lof), csq_list
)
no_lof_flags = hl.or_missing(
hl.is_defined(lof),
csq_list.any(lambda x: (x.lof == lof) & hl.is_missing(x.lof_flags)),
)
all_csq_terms = csq_list.flatmap(lambda x: x.consequence_terms)
most_severe_csq = hl.literal(csq_order).find(
lambda x: all_csq_terms.contains(x)
)
return hl.struct(
most_severe_csq=most_severe_csq,
protein_coding=protein_coding,
lof=lof,
no_lof_flags=no_lof_flags,
)
protein_coding = ht.vep.transcript_consequences.filter(
lambda x: x.biotype == "protein_coding"
)
return ht.annotate(
**hl.case(missing_false=True)
.when(hl.len(protein_coding) > 0, _get_most_severe_csq(protein_coding, True))
.when(
hl.len(ht.vep.transcript_consequences) > 0,
_get_most_severe_csq(ht.vep.transcript_consequences, False),
)
.when(
hl.len(ht.vep.regulatory_feature_consequences) > 0,
_get_most_severe_csq(ht.vep.regulatory_feature_consequences, False),
)
.when(
hl.len(ht.vep.motif_feature_consequences) > 0,
_get_most_severe_csq(ht.vep.motif_feature_consequences, False),
)
.default(_get_most_severe_csq(ht.vep.intergenic_consequences, False))
)
|
[
"hail.default_reference",
"hail.vep",
"logging.getLogger",
"hail.is_missing",
"hail.is_defined",
"hail.eval",
"hail.struct",
"hail.sorted",
"hail.case",
"hail.empty_array",
"subprocess.check_output",
"hail.null",
"json.load",
"hail.hadoop_open",
"logging.basicConfig",
"hail.delimit",
"hail.len",
"hail.literal",
"hail.str",
"hail.format",
"hail.cond"
] |
[((198, 276), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(levelname)s (%(name)s %(lineno)s): %(message)s"""'}), "(format='%(levelname)s (%(name)s %(lineno)s): %(message)s')\n", (217, 276), False, 'import logging\n'), ((286, 313), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (303, 313), False, 'import logging\n'), ((7600, 7633), 'hail.vep', 'hl.vep', (['revep_ht', 'vep_config_path'], {}), '(revep_ht, vep_config_path)\n', (7606, 7633), True, 'import hail as hl\n'), ((8055, 8076), 'hail.literal', 'hl.literal', (['CSQ_ORDER'], {}), '(CSQ_ORDER)\n', (8065, 8076), True, 'import hail as hl\n'), ((8820, 8841), 'hail.literal', 'hl.literal', (['CSQ_ORDER'], {}), '(CSQ_ORDER)\n', (8830, 8841), True, 'import hail as hl\n'), ((10588, 10642), 'hail.sorted', 'hl.sorted', (['worst_csq_gene'], {'key': '(lambda tc: tc.csq_score)'}), '(worst_csq_gene, key=lambda tc: tc.csq_score)\n', (10597, 10642), True, 'import hail as hl\n'), ((10937, 11001), 'hail.sorted', 'hl.sorted', (['worst_csq_gene_canonical'], {'key': '(lambda tc: tc.csq_score)'}), '(worst_csq_gene_canonical, key=lambda tc: tc.csq_score)\n', (10946, 11001), True, 'import hail as hl\n'), ((16968, 16991), 'hail.empty_array', 'hl.empty_array', (['hl.tstr'], {}), '(hl.tstr)\n', (16982, 16991), True, 'import hail as hl\n'), ((3656, 3687), 'hail.hadoop_open', 'hl.hadoop_open', (['vep_config_path'], {}), '(vep_config_path)\n', (3670, 3687), True, 'import hail as hl\n'), ((3729, 3755), 'json.load', 'json.load', (['vep_config_file'], {}), '(vep_config_file)\n', (3738, 3755), False, 'import json\n'), ((5745, 5776), 'hail.hadoop_open', 'hl.hadoop_open', (['vep_config_path'], {}), '(vep_config_path)\n', (5759, 5776), True, 'import hail as hl\n'), ((6842, 6876), 'hail.eval', 'hl.eval', (['reference_vep_ht.vep_help'], {}), '(reference_vep_ht.vep_help)\n', (6849, 6876), True, 'import hail as hl\n'), ((6906, 6942), 'hail.eval', 'hl.eval', (['reference_vep_ht.vep_config'], {}), '(reference_vep_ht.vep_config)\n', (6913, 6942), True, 'import hail as hl\n'), ((7514, 7535), 'hail.is_defined', 'hl.is_defined', (['ht.vep'], {}), '(ht.vep)\n', (7527, 7535), True, 'import hail as hl\n'), ((7562, 7583), 'hail.is_missing', 'hl.is_missing', (['ht.vep'], {}), '(ht.vep)\n', (7575, 7583), True, 'import hail as hl\n'), ((18974, 18990), 'hail.null', 'hl.null', (['hl.tstr'], {}), '(hl.tstr)\n', (18981, 18990), True, 'import hail as hl\n'), ((19014, 19031), 'hail.null', 'hl.null', (['hl.tbool'], {}), '(hl.tbool)\n', (19021, 19031), True, 'import hail as hl\n'), ((19704, 19817), 'hail.struct', 'hl.struct', ([], {'most_severe_csq': 'most_severe_csq', 'protein_coding': 'protein_coding', 'lof': 'lof', 'no_lof_flags': 'no_lof_flags'}), '(most_severe_csq=most_severe_csq, protein_coding=protein_coding,\n lof=lof, no_lof_flags=no_lof_flags)\n', (19713, 19817), True, 'import hail as hl\n'), ((4312, 4334), 'hail.default_reference', 'hl.default_reference', ([], {}), '()\n', (4332, 4334), True, 'import hail as hl\n'), ((5586, 5608), 'hail.default_reference', 'hl.default_reference', ([], {}), '()\n', (5606, 5608), True, 'import hail as hl\n'), ((6559, 6586), 'hail.vep', 'hl.vep', (['ht', 'vep_config_path'], {}), '(ht, vep_config_path)\n', (6565, 6586), True, 'import hail as hl\n'), ((17549, 17560), 'hail.len', 'hl.len', (['csq'], {}), '(csq)\n', (17555, 17560), True, 'import hail as hl\n'), ((3819, 3860), 'subprocess.check_output', 'subprocess.check_output', (['[vep_command[0]]'], {}), '([vep_command[0]])\n', (3842, 3860), False, 'import subprocess\n'), ((10232, 10243), 'hail.len', 'hl.len', (['tcl'], {}), '(tcl)\n', (10238, 10243), True, 'import hail as hl\n'), ((10249, 10286), 'hail.sorted', 'hl.sorted', (['tcl', '(lambda x: x.csq_score)'], {}), '(tcl, lambda x: x.csq_score)\n', (10258, 10286), True, 'import hail as hl\n'), ((14397, 14449), 'hail.delimit', 'hl.delimit', (['element.consequence_terms'], {'delimiter': '"""&"""'}), "(element.consequence_terms, delimiter='&')\n", (14407, 14449), True, 'import hail as hl\n'), ((16791, 16836), 'hail.format', 'hl.format', (['"""%.3f"""', 'element.motif_score_change'], {}), "('%.3f', element.motif_score_change)\n", (16800, 16836), True, 'import hail as hl\n'), ((17474, 17497), 'hail.empty_array', 'hl.empty_array', (['hl.tstr'], {}), '(hl.tstr)\n', (17488, 17497), True, 'import hail as hl\n'), ((19244, 19262), 'hail.is_defined', 'hl.is_defined', (['lof'], {}), '(lof)\n', (19257, 19262), True, 'import hail as hl\n'), ((19386, 19404), 'hail.is_defined', 'hl.is_defined', (['lof'], {}), '(lof)\n', (19399, 19404), True, 'import hail as hl\n'), ((19603, 19624), 'hail.literal', 'hl.literal', (['csq_order'], {}), '(csq_order)\n', (19613, 19624), True, 'import hail as hl\n'), ((11380, 11401), 'hail.len', 'hl.len', (['sorted_scores'], {}), '(sorted_scores)\n', (11386, 11401), True, 'import hail as hl\n'), ((11563, 11594), 'hail.len', 'hl.len', (['sorted_canonical_scores'], {}), '(sorted_canonical_scores)\n', (11569, 11594), True, 'import hail as hl\n'), ((15122, 15164), 'hail.cond', 'hl.cond', (['(element.canonical == 1)', '"""YES"""', '""""""'], {}), "(element.canonical == 1, 'YES', '')\n", (15129, 15164), True, 'import hail as hl\n'), ((19130, 19155), 'hail.literal', 'hl.literal', (['loftee_labels'], {}), '(loftee_labels)\n', (19140, 19155), True, 'import hail as hl\n'), ((15412, 15438), 'hail.str', 'hl.str', (['element.cdna_start'], {}), '(element.cdna_start)\n', (15418, 15438), True, 'import hail as hl\n'), ((15677, 15702), 'hail.str', 'hl.str', (['element.cds_start'], {}), '(element.cds_start)\n', (15683, 15702), True, 'import hail as hl\n'), ((15942, 15971), 'hail.str', 'hl.str', (['element.protein_start'], {}), '(element.protein_start)\n', (15948, 15971), True, 'import hail as hl\n'), ((16283, 16320), 'hail.format', 'hl.format', (['"""%.3f"""', 'element.sift_score'], {}), "('%.3f', element.sift_score)\n", (16292, 16320), True, 'import hail as hl\n'), ((16456, 16497), 'hail.format', 'hl.format', (['"""%.3f"""', 'element.polyphen_score'], {}), "('%.3f', element.polyphen_score)\n", (16465, 16497), True, 'import hail as hl\n'), ((19462, 19488), 'hail.is_missing', 'hl.is_missing', (['x.lof_flags'], {}), '(x.lof_flags)\n', (19475, 19488), True, 'import hail as hl\n'), ((20487, 20528), 'hail.len', 'hl.len', (['ht.vep.motif_feature_consequences'], {}), '(ht.vep.motif_feature_consequences)\n', (20493, 20528), True, 'import hail as hl\n'), ((15592, 15616), 'hail.str', 'hl.str', (['element.cdna_end'], {}), '(element.cdna_end)\n', (15598, 15616), True, 'import hail as hl\n'), ((15854, 15877), 'hail.str', 'hl.str', (['element.cds_end'], {}), '(element.cds_end)\n', (15860, 15877), True, 'import hail as hl\n'), ((16131, 16158), 'hail.str', 'hl.str', (['element.protein_end'], {}), '(element.protein_end)\n', (16137, 16158), True, 'import hail as hl\n'), ((20317, 20363), 'hail.len', 'hl.len', (['ht.vep.regulatory_feature_consequences'], {}), '(ht.vep.regulatory_feature_consequences)\n', (20323, 20363), True, 'import hail as hl\n'), ((20163, 20201), 'hail.len', 'hl.len', (['ht.vep.transcript_consequences'], {}), '(ht.vep.transcript_consequences)\n', (20169, 20201), True, 'import hail as hl\n'), ((20022, 20049), 'hail.case', 'hl.case', ([], {'missing_false': '(True)'}), '(missing_false=True)\n', (20029, 20049), True, 'import hail as hl\n'), ((20064, 20086), 'hail.len', 'hl.len', (['protein_coding'], {}), '(protein_coding)\n', (20070, 20086), True, 'import hail as hl\n'), ((9377, 9404), 'hail.case', 'hl.case', ([], {'missing_false': '(True)'}), '(missing_false=True)\n', (9384, 9404), True, 'import hail as hl\n')]
|
import os
import json
import random
import re
from datetime import datetime
from discord.ext import commands, tasks
from discord_slash import cog_ext, SlashContext
########
# PICKING A RANDOM PRODUCT FROM THE SCRAPED LIST!
########
def item_pick():
data = []
locations = {
0: "https://www.amazon.com"
}
try:
for filename in sorted(os.listdir('./scraper/crawler')):
if filename.endswith('.json'):
with open(f'scraper/crawler/{filename}') as file:
data.append(json.load(file))
except ValueError:
print("[WARNING] JSON file missing. Is a manual scrape in progress?")
#dataType = random.randint(0, (len(data) - 1)) #picking a web domain
chosenData = random.choice(data[0]) #picking a random location within said web domain
#some page scrapes return empty "url" definitions
while not chosenData.get("url"):
print("[DEBUG] Chosen listing is an empty URL. Choosing again. ")
chosenData = random.choice(data[dataType])
print("[DEBUG] Valid listing chosen.")
#grabbing the URL and splicing it with the domain if necessary
chosenProduct = random.choice(chosenData.get("url"))
resPart = "https://www.amazon.com" if chosenProduct.startswith("/") else ""
result = resPart + chosenProduct
return result
########
# Writing to JSON
########
def write_file(data):
with open('recSettings.json', 'w', encoding='utf-8') as output:
json.dump(data, output, ensure_ascii=False, indent=4)
########
# Checking if the command issuer is the bot owner
########
async def check_owner(ctx):
ownerID = int(os.getenv('OWNER_ID'))
if ctx.author.id != ownerID:
await ctx.send('{}, you don\'t have permission to use this command.'.format(ctx.message.author.mention))
return 1
return 0
#######
# Appending new server data
######
def new_server(settings, guildID):
messageDict = settings.get('recommend')
if guildID not in messageDict.get('currentCount'):
print("[DEBUG]: New Server. Adding entries to JSON.")
messageDict.get('maxMessage')[guildID] = 9
messageDict.get('currentCount')[guildID] = 0
messageDict.get('lastScrape')[guildID] = str(datetime.now())
messageDict.get('interject')[guildID] = True
write_file(settings)
# I should probably move the above functions for form, but I'm too lazy to rn
##################################################################
##################################################################
### ###
### THE ACTUAL COG CLASS ###
### ###
##################################################################
##################################################################
class Recommend(commands.Cog):
def __init__(self, bot):
self.bot = bot
#loading server settings from JSON
self.settings = {}
if os.path.isfile('recSettings.json') and os.access('recSettings.json', os.R_OK):
with open('recSettings.json') as file:
self.settings = (json.load(file))
else:
print("[INFO]: JSON file missing or unreadable. Creating new JSON.")
masterServer = os.getenv('MASTER_SERVER_ID')
self.settings ={
'recommend': {
'maxMessage': {
masterServer: 24
},
'currentCount': {
masterServer: 0
},
'lastScrape': {
masterServer: str(datetime.now())
},
'interject': {
masterServer: True
}
}
}
write_file(self.settings)
#self.auto_crawl.start() #initiate auto_crawl background task
print("[DEBUG] Recommendation Cog loaded")
########
# BACKGROUND EVENT: RUN THE SPIDER EVERY MONTH FROM THE LAST CRAWL
########
@tasks.loop(minutes=60.0)
async def auto_crawl(self):
#Check elapsed time from last scrape in master server
masterServer = os.getenv('MASTER_SERVER_ID')
oldTime = self.settings.get("recommend").get("lastScrape").get(masterServer)
oldStrp = datetime.strptime(oldTime, "%Y-%m-%d %H:%M:%S.%f")
timeDelta = (datetime.now() - oldStrp).days
if (timeDelta < 30): #30 days
return
self.settings.get("recommend").get("lastScrape")[masterServer] = str(datetime.now())
write_file(self.settings)
print("[SYSTEM] Month since last scrape. Initiating Re-Scrape.")
os.system('scraper/runSpider.sh')
########
# initiateCrawl: FORCE THE BOT TO RUN THE SPIDER
########
@commands.command(name="initiateCrawl", description="Forces a crawl for updated product listings.", aliases=['ic'], hidden=True)
async def force_crawl(self, ctx):
if await check_owner(ctx) == 1:
return
self.settings.get("recommend").get("lastScrape")[str(ctx.message.guild.id)] = str(datetime.now())
write_file(self.settings)
print("[SYSTEM] Web Crawl forcefully initiated.")
os.system('scraper/runSpider.sh')
########
# messageGap: HOW LONG UNTIL THE THING INTERJECTS
########
@commands.command(name="messageGap", description="How many messages until a product plug? DEF.: 25", aliases=['mg'])
async def interject_cd(self, ctx, num):
new_server(self.settings, str(ctx.message.guild.id))
try:
if isinstance(int(num), int):
self.settings.get("recommend").get("maxMessage")[str(ctx.message.guild.id)] = abs(int(num)) - 1
write_file(self.settings)
print("[UPDATE] Interjection Interval has been changed >> " + '{}'.format(abs(int(num))))
await ctx.send("Interjection interval updated.")
except ValueError:
print("[DEBUG] Interjection interval change attempt made. Invalid arguement given.")
await ctx.send("Provided arguement is not an integer. Please try again and provide an integer arguement.")
@interject_cd.error
async def interject_cd_error(self, ctx, error):
await ctx.send("Invalid arguments given. Refer to +help for more information.")
########
# recommend: GENERATE A RECOMMENDATION
########
@commands.command(name="recommend", description="Get a random recommendation.", aliases=['rec', 'r'])
async def item_recommend_c(self, ctx):
new_server(self.settings, str(ctx.message.guild.id))
productURL = item_pick()
await ctx.send("Here's a product you may enjoy: " + productURL)
@cog_ext.cog_slash(name="recommend", description="Get a random recommendation.")
async def item_recommend(self, ctx: SlashContext):
productURL = item_pick()
await ctx.send("Here's a product you may enjoy: " + productURL)
########
# interjectToggle: DISABLE INTERJECTIONS
########
@commands.command(name="interjectToggle", description="Disables bot interjections", aliases=['id'])
async def interject_toggle(self, ctx):
stringID = str(ctx.message.guild.id)
interject = self.settings.get("recommend")
new_server(self.settings, stringID)
interject.get('interject')[stringID] = False if interject.get('interject')[stringID] else True
write_file(self.settings)
await ctx.send("Interjections enabled." if interject.get("interject")[stringID] else "Interjections disabled.")
########
# ON LISTENER FOR MESSAGES
########
@commands.Cog.listener()
async def on_message(self, message):
if message.author == self.bot.user:
return
messageDict = self.settings.get("recommend")
guildID = str(message.guild.id)
new_server(self.settings, guildID)
"""
if guildID not in messageDict.get("currentCount"):
print("[DEBUG]: New server, adding it to settings file.");
messageDict.get('maxMessage')[guildID] = 9
messageDict.get('currentCount')[guildID] = 0
messageDict.get('lastScrape')[guildID] = str(datetime.now())
"""
currentCount = messageDict.get("currentCount").get(guildID)
curCountMut = messageDict.get("currentCount")
maxMessage = messageDict.get("maxMessage").get(guildID)
if maxMessage <= 0 or messageDict.get('interject').get(guildID):
return
elif currentCount >= maxMessage:
print("[STATUS] Message count cap reached. Making interjection.")
curCountMut[guildID] = 0
words = re.findall(r'\b\w+\b', message.content)
productURL = item_pick()
try:
await message.channel.send(
"I'm terribly sorry to interrupt you right now, but I couldn't help myself from interjecting. " +
"That being said, I happened to overhear that you've mentioned: [" + random.choice(words) + "]! " +
"And we just so happen to have something you may be very interested in as a result! Behold: " +
productURL
)
except IndexError:
await message.channel.send(
"I'm terribly sorry to interrupt you right now, but I couldn't help myself from interjecting. " +
"That being said, I happened to overhear you just then. And right now, we happen to have something " +
"in stock that you may be interested in! Behold: " + productURL
)
write_file(self.settings)
else:
curCountMut[guildID] += 1
write_file(self.settings)
def setup(bot):
bot.add_cog(Recommend(bot))
|
[
"os.listdir",
"json.dump",
"json.load",
"discord.ext.commands.command",
"random.choice",
"os.system",
"discord.ext.commands.Cog.listener",
"datetime.datetime.strptime",
"discord.ext.tasks.loop",
"discord_slash.cog_ext.cog_slash",
"os.path.isfile",
"re.findall",
"os.access",
"datetime.datetime.now",
"os.getenv"
] |
[((747, 769), 'random.choice', 'random.choice', (['data[0]'], {}), '(data[0])\n', (760, 769), False, 'import random\n'), ((4076, 4100), 'discord.ext.tasks.loop', 'tasks.loop', ([], {'minutes': '(60.0)'}), '(minutes=60.0)\n', (4086, 4100), False, 'from discord.ext import commands, tasks\n'), ((4840, 4977), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""initiateCrawl"""', 'description': '"""Forces a crawl for updated product listings."""', 'aliases': "['ic']", 'hidden': '(True)'}), "(name='initiateCrawl', description=\n 'Forces a crawl for updated product listings.', aliases=['ic'], hidden=True\n )\n", (4856, 4977), False, 'from discord.ext import commands, tasks\n'), ((5392, 5512), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""messageGap"""', 'description': '"""How many messages until a product plug? DEF.: 25"""', 'aliases': "['mg']"}), "(name='messageGap', description=\n 'How many messages until a product plug? DEF.: 25', aliases=['mg'])\n", (5408, 5512), False, 'from discord.ext import commands, tasks\n'), ((6481, 6586), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""recommend"""', 'description': '"""Get a random recommendation."""', 'aliases': "['rec', 'r']"}), "(name='recommend', description=\n 'Get a random recommendation.', aliases=['rec', 'r'])\n", (6497, 6586), False, 'from discord.ext import commands, tasks\n'), ((6800, 6879), 'discord_slash.cog_ext.cog_slash', 'cog_ext.cog_slash', ([], {'name': '"""recommend"""', 'description': '"""Get a random recommendation."""'}), "(name='recommend', description='Get a random recommendation.')\n", (6817, 6879), False, 'from discord_slash import cog_ext, SlashContext\n'), ((7117, 7220), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""interjectToggle"""', 'description': '"""Disables bot interjections"""', 'aliases': "['id']"}), "(name='interjectToggle', description=\n 'Disables bot interjections', aliases=['id'])\n", (7133, 7220), False, 'from discord.ext import commands, tasks\n'), ((7721, 7744), 'discord.ext.commands.Cog.listener', 'commands.Cog.listener', ([], {}), '()\n', (7742, 7744), False, 'from discord.ext import commands, tasks\n'), ((1007, 1036), 'random.choice', 'random.choice', (['data[dataType]'], {}), '(data[dataType])\n', (1020, 1036), False, 'import random\n'), ((1479, 1532), 'json.dump', 'json.dump', (['data', 'output'], {'ensure_ascii': '(False)', 'indent': '(4)'}), '(data, output, ensure_ascii=False, indent=4)\n', (1488, 1532), False, 'import json\n'), ((1648, 1669), 'os.getenv', 'os.getenv', (['"""OWNER_ID"""'], {}), "('OWNER_ID')\n", (1657, 1669), False, 'import os\n'), ((4218, 4247), 'os.getenv', 'os.getenv', (['"""MASTER_SERVER_ID"""'], {}), "('MASTER_SERVER_ID')\n", (4227, 4247), False, 'import os\n'), ((4351, 4401), 'datetime.datetime.strptime', 'datetime.strptime', (['oldTime', '"""%Y-%m-%d %H:%M:%S.%f"""'], {}), "(oldTime, '%Y-%m-%d %H:%M:%S.%f')\n", (4368, 4401), False, 'from datetime import datetime\n'), ((4721, 4754), 'os.system', 'os.system', (['"""scraper/runSpider.sh"""'], {}), "('scraper/runSpider.sh')\n", (4730, 4754), False, 'import os\n'), ((5272, 5305), 'os.system', 'os.system', (['"""scraper/runSpider.sh"""'], {}), "('scraper/runSpider.sh')\n", (5281, 5305), False, 'import os\n'), ((361, 392), 'os.listdir', 'os.listdir', (['"""./scraper/crawler"""'], {}), "('./scraper/crawler')\n", (371, 392), False, 'import os\n'), ((2246, 2260), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2258, 2260), False, 'from datetime import datetime\n'), ((2969, 3003), 'os.path.isfile', 'os.path.isfile', (['"""recSettings.json"""'], {}), "('recSettings.json')\n", (2983, 3003), False, 'import os\n'), ((3008, 3046), 'os.access', 'os.access', (['"""recSettings.json"""', 'os.R_OK'], {}), "('recSettings.json', os.R_OK)\n", (3017, 3046), False, 'import os\n'), ((3272, 3301), 'os.getenv', 'os.getenv', (['"""MASTER_SERVER_ID"""'], {}), "('MASTER_SERVER_ID')\n", (3281, 3301), False, 'import os\n'), ((4589, 4603), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4601, 4603), False, 'from datetime import datetime\n'), ((5156, 5170), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5168, 5170), False, 'from datetime import datetime\n'), ((3132, 3147), 'json.load', 'json.load', (['file'], {}), '(file)\n', (3141, 3147), False, 'import json\n'), ((4423, 4437), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4435, 4437), False, 'from datetime import datetime\n'), ((8792, 8833), 're.findall', 're.findall', (['"""\\\\b\\\\w+\\\\b"""', 'message.content'], {}), "('\\\\b\\\\w+\\\\b', message.content)\n", (8802, 8833), False, 'import re\n'), ((536, 551), 'json.load', 'json.load', (['file'], {}), '(file)\n', (545, 551), False, 'import json\n'), ((3641, 3655), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3653, 3655), False, 'from datetime import datetime\n'), ((9139, 9159), 'random.choice', 'random.choice', (['words'], {}), '(words)\n', (9152, 9159), False, 'import random\n')]
|
# coding: utf-8
# In[1]:
from cltk.tag.pos.pos_tagger import POSTag
from cltk.tokenize.sentence.tokenize_sentences import TokenizeSentence
import os
import re
# In[2]:
def extract_tlg_work(file_path, regex_match):
abs_path = os.path.expanduser(file_path)
with open(abs_path) as f:
r = f.read()
d = re.compile(regex_match)
m = d.findall(r)
for x in m:
work_str = x[1]
return work_str
# In[3]:
anabasis_path = '~/cltk_data/compiled/tlg/TLG0032.txt'
anabasis_regex = r'(@1 \{1ΚΥΡΟΥ ΑΝΑΒΑΣΕΩΣ Α\}1 @)(.*)( @1 \{1ΚΥΡΟΥ ΠΑΙΔΕΙΑΣ Α\}1 @)'
anabasis_raw = extract_tlg_work(anabasis_path, anabasis_regex)
# In[4]:
def cleanup_tlg_txt(tlg_str):
# fix beta code transliteration problems
tlg_str = re.sub(r'ι\+', 'ϊ', tlg_str)
tlg_str = re.sub(r'ί\+', 'ΐ', tlg_str)
tlg_str = re.sub(r'\\.', '.', tlg_str)
# fix tlg markup
tlg_str = re.sub(r'@1 \{1.+?\}1 @', '', tlg_str) # rm book titles
tlg_str = re.sub(r'\[.+?\]', '', tlg_str) # rm words in square brackets
tlg_str = re.sub(r'[0-9]', '', tlg_str)
tlg_str = re.sub(r'@|%|\x00', '', tlg_str)
tlg_str = re.sub('—', ' — ', tlg_str)
return tlg_str
# In[5]:
anabasis_clean = cleanup_tlg_txt(anabasis_raw)
# In[6]:
def tokenize_sentences(in_str):
"""tokenize into list of sentences"""
t = TokenizeSentence()
out_list = t.sentence_tokenizer(in_str, 'greek')
return out_list
# In[7]:
anabasis_sentences = tokenize_sentences(anabasis_clean)
# In[8]:
def append_to_file(file_name, pos_str):
user_data = os.path.expanduser('~/cltk_data/user_data/')
if not os.path.isdir(user_data):
os.makedirs(user_data)
file_name = str('pos_editable_') + str(file_name) + str('.md')
file_path = os.path.join(user_data, file_name)
with open(file_path, 'a') as f:
f.write(pos_str)
# In[9]:
def editable_pos_text(untagged_sentences):
"""POS tag each sentence and print text."""
p = POSTag()
counter = 0
for sentence in untagged_sentences:
counter += 1
tagged_words = p.tnt_tagger(sentence, 'greek') # ~ 6 sec. per sent
tags_newlines = ''
unknowns = [] # mk list of untagged words
for tagged_word in tagged_words:
line = str(tagged_word) + '\n'
tags_newlines = tags_newlines + line
if tagged_word[1] == 'Unk':
unknowns.append(tagged_word[0])
# print str of human-readable sentence
sent_str_out = """## Sentence %s
### Plaintext
%s
```
### Tagged
%s```
### Unknown words
%s
### Corrected by
['']
""" % (counter, sentence, tags_newlines, unknowns)
append_to_file('xenophon_anabasis', sent_str_out)
# In[10]:
editable_pos_text(anabasis_sentences)
|
[
"os.path.join",
"os.makedirs",
"os.path.isdir",
"cltk.tag.pos.pos_tagger.POSTag",
"cltk.tokenize.sentence.tokenize_sentences.TokenizeSentence",
"os.path.expanduser",
"re.sub",
"re.compile"
] |
[((235, 264), 'os.path.expanduser', 'os.path.expanduser', (['file_path'], {}), '(file_path)\n', (253, 264), False, 'import os\n'), ((324, 347), 're.compile', 're.compile', (['regex_match'], {}), '(regex_match)\n', (334, 347), False, 'import re\n'), ((745, 773), 're.sub', 're.sub', (['"""ι\\\\+"""', '"""ϊ"""', 'tlg_str'], {}), "('ι\\\\+', 'ϊ', tlg_str)\n", (751, 773), False, 'import re\n'), ((788, 816), 're.sub', 're.sub', (['"""ί\\\\+"""', '"""ΐ"""', 'tlg_str'], {}), "('ί\\\\+', 'ΐ', tlg_str)\n", (794, 816), False, 'import re\n'), ((831, 860), 're.sub', 're.sub', (['"""\\\\\\\\."""', '"""."""', 'tlg_str'], {}), "('\\\\\\\\.', '.', tlg_str)\n", (837, 860), False, 'import re\n'), ((895, 934), 're.sub', 're.sub', (['"""@1 \\\\{1.+?\\\\}1 @"""', '""""""', 'tlg_str'], {}), "('@1 \\\\{1.+?\\\\}1 @', '', tlg_str)\n", (901, 934), False, 'import re\n'), ((966, 998), 're.sub', 're.sub', (['"""\\\\[.+?\\\\]"""', '""""""', 'tlg_str'], {}), "('\\\\[.+?\\\\]', '', tlg_str)\n", (972, 998), False, 'import re\n'), ((1043, 1071), 're.sub', 're.sub', (['"""[0-9]"""', '""""""', 'tlg_str'], {}), "('[0-9]', '', tlg_str)\n", (1049, 1071), False, 'import re\n'), ((1087, 1119), 're.sub', 're.sub', (['"""@|%|\\\\x00"""', '""""""', 'tlg_str'], {}), "('@|%|\\\\x00', '', tlg_str)\n", (1093, 1119), False, 'import re\n'), ((1134, 1161), 're.sub', 're.sub', (['"""—"""', '""" — """', 'tlg_str'], {}), "('—', ' — ', tlg_str)\n", (1140, 1161), False, 'import re\n'), ((1334, 1352), 'cltk.tokenize.sentence.tokenize_sentences.TokenizeSentence', 'TokenizeSentence', ([], {}), '()\n', (1350, 1352), False, 'from cltk.tokenize.sentence.tokenize_sentences import TokenizeSentence\n'), ((1562, 1606), 'os.path.expanduser', 'os.path.expanduser', (['"""~/cltk_data/user_data/"""'], {}), "('~/cltk_data/user_data/')\n", (1580, 1606), False, 'import os\n'), ((1758, 1792), 'os.path.join', 'os.path.join', (['user_data', 'file_name'], {}), '(user_data, file_name)\n', (1770, 1792), False, 'import os\n'), ((1965, 1973), 'cltk.tag.pos.pos_tagger.POSTag', 'POSTag', ([], {}), '()\n', (1971, 1973), False, 'from cltk.tag.pos.pos_tagger import POSTag\n'), ((1618, 1642), 'os.path.isdir', 'os.path.isdir', (['user_data'], {}), '(user_data)\n', (1631, 1642), False, 'import os\n'), ((1652, 1674), 'os.makedirs', 'os.makedirs', (['user_data'], {}), '(user_data)\n', (1663, 1674), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('comm', '0019_customtextconfig_use_in_report'),
]
operations = [
migrations.AlterModelOptions(
name='service',
options={'ordering': ['-display_prioirity'], 'verbose_name': 'pos\u0142uga', 'verbose_name_plural': 'pos\u0142ugi'},
),
migrations.AddField(
model_name='service',
name='display_prioirity',
field=models.IntegerField(default=0, help_text='pos\u0142ugi w tabelce obecno\u015bci s\u0105 uporz\u0105dkowane wed\u0142ugmalej\u0105cego priorytetu', verbose_name='priorytet w tabelce'),
preserve_default=True,
),
migrations.AlterField(
model_name='customtextconfig',
name='obligatory',
field=models.BooleanField(default=False, help_text='Punkty obowi\u0105zkowe pojawi\u0105 si\u0119 w ka\u017cdym sprawozdaniu.', verbose_name='obowi\u0105zkowy'),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='attendance',
unique_together=set([('report', 'diocese', 'service')]),
),
]
|
[
"django.db.models.IntegerField",
"django.db.migrations.AlterModelOptions",
"django.db.models.BooleanField"
] |
[((260, 420), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""service"""', 'options': "{'ordering': ['-display_prioirity'], 'verbose_name': 'posługa',\n 'verbose_name_plural': 'posługi'}"}), "(name='service', options={'ordering': [\n '-display_prioirity'], 'verbose_name': 'posługa', 'verbose_name_plural':\n 'posługi'})\n", (288, 420), False, 'from django.db import models, migrations\n'), ((577, 738), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'help_text': '"""posługi w tabelce obecności są uporządkowane wedługmalejącego priorytetu"""', 'verbose_name': '"""priorytet w tabelce"""'}), "(default=0, help_text=\n 'posługi w tabelce obecności są uporządkowane wedługmalejącego priorytetu',\n verbose_name='priorytet w tabelce')\n", (596, 738), False, 'from django.db import models, migrations\n'), ((930, 1069), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'help_text': '"""Punkty obowiązkowe pojawią się w każdym sprawozdaniu."""', 'verbose_name': '"""obowiązkowy"""'}), "(default=False, help_text=\n 'Punkty obowiązkowe pojawią się w każdym sprawozdaniu.', verbose_name=\n 'obowiązkowy')\n", (949, 1069), False, 'from django.db import models, migrations\n')]
|
#!/usr/bin/env python
import os
import sys
import site
ROOT = os.path.dirname(os.path.abspath(__file__))
path = lambda *a: os.path.join(ROOT, *a)
prev_sys_path = list(sys.path)
site.addsitedir(path('vendor'))
site.addsitedir(path('vendor/lib/python'))
site.addsitedir(path('apps'))
site.addsitedir(path('lib'))
site.addsitedir(path('lib/addon-sdk-0.9/python-lib')) # weak sauce
# Move the new items to the front of sys.path. (via virtualenv)
new_sys_path = []
for item in list(sys.path):
if item not in prev_sys_path:
new_sys_path.append(item)
sys.path.remove(item)
sys.path[:0] = new_sys_path
# No third-party imports until we've added all our sitedirs!
from django.core.management import execute_manager, setup_environ
try:
import settings_local as settings
except ImportError:
try:
import settings
except ImportError:
import sys
sys.stderr.write(
"Error: Tried importing 'settings_local.py' and 'settings.py' "
"but neither could be found (or they're throwing an ImportError)."
" Please come back and try again later.")
raise
if settings.PRODUCTION:
for app in settings.DEV_APPS:
if app in settings.INSTALLED_APPS:
settings.INSTALLED_APPS.remove(app)
for middleware in settings.DEV_MIDDLEWARE_CLASSES:
if middleware in settings.MIDDLEWARE_CLASSES:
settings.MIDDLEWARE_CLASSES.remove(middleware)
# The first thing execute_manager does is call `setup_environ`. Logging config
# needs to access settings, so we'll setup the environ early.
setup_environ(settings)
# Import for side-effect: configures our logging handlers.
# pylint: disable-msg=W0611
import log_settings
if __name__ == "__main__":
execute_manager(settings)
|
[
"os.path.abspath",
"sys.path.remove",
"django.core.management.execute_manager",
"settings.MIDDLEWARE_CLASSES.remove",
"django.core.management.setup_environ",
"settings.INSTALLED_APPS.remove",
"sys.stderr.write",
"os.path.join"
] |
[((1599, 1622), 'django.core.management.setup_environ', 'setup_environ', (['settings'], {}), '(settings)\n', (1612, 1622), False, 'from django.core.management import execute_manager, setup_environ\n'), ((80, 105), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (95, 105), False, 'import os\n'), ((125, 147), 'os.path.join', 'os.path.join', (['ROOT', '*a'], {}), '(ROOT, *a)\n', (137, 147), False, 'import os\n'), ((1763, 1788), 'django.core.management.execute_manager', 'execute_manager', (['settings'], {}), '(settings)\n', (1778, 1788), False, 'from django.core.management import execute_manager, setup_environ\n'), ((570, 591), 'sys.path.remove', 'sys.path.remove', (['item'], {}), '(item)\n', (585, 591), False, 'import sys\n'), ((1251, 1286), 'settings.INSTALLED_APPS.remove', 'settings.INSTALLED_APPS.remove', (['app'], {}), '(app)\n', (1281, 1286), False, 'import settings\n'), ((1409, 1455), 'settings.MIDDLEWARE_CLASSES.remove', 'settings.MIDDLEWARE_CLASSES.remove', (['middleware'], {}), '(middleware)\n', (1443, 1455), False, 'import settings\n'), ((896, 1089), 'sys.stderr.write', 'sys.stderr.write', (['"""Error: Tried importing \'settings_local.py\' and \'settings.py\' but neither could be found (or they\'re throwing an ImportError). Please come back and try again later."""'], {}), '(\n "Error: Tried importing \'settings_local.py\' and \'settings.py\' but neither could be found (or they\'re throwing an ImportError). Please come back and try again later."\n )\n', (912, 1089), False, 'import sys\n')]
|
import resources as res
import numpy as np
import nltk
class Feature(object):
dataset = None
def __init__(self, dataset):
self.dataset = dataset
def run(self):
array = []
for text in self.dataset:
bigrams = 0
counter = 0
words = nltk.word_tokenize(text)
if len(words) < 3:
array.append(0.0)
continue
for i in range(0, len(words) - 1):
counter+=1
if words[i].lower() in res.__bigrams__ and words[i + 1].lower() in res.__bigrams__[words[i].lower()]:
bigrams += 1
if counter == 0:
array.append(0.0)
else:
array.append(float(bigrams/counter))
return np.matrix(array)
|
[
"numpy.matrix",
"nltk.word_tokenize"
] |
[((800, 816), 'numpy.matrix', 'np.matrix', (['array'], {}), '(array)\n', (809, 816), True, 'import numpy as np\n'), ((307, 331), 'nltk.word_tokenize', 'nltk.word_tokenize', (['text'], {}), '(text)\n', (325, 331), False, 'import nltk\n')]
|
from __future__ import absolute_import
import numpy
import orange, statc
from . import stats
def mean(l):
return float(sum(l))/len(l)
class MA_pearsonCorrelation:
"""
Calling an object of this class computes Pearson correlation of all
attributes against class.
"""
def __call__(self, i, data):
dom2 = orange.Domain([data.domain.attributes[i]], data.domain.classVar)
data2 = orange.ExampleTable(dom2, data)
a,c = data2.toNumpy("A/C")
return numpy.corrcoef(c,a[:,0])[0,1]
class MA_signalToNoise:
"""
Returns signal to noise measurement: difference of means of two classes
divided by the sum of standard deviations for both classes.
Usege similar to MeasureAttribute*.
Standard deviation used for now returns minmally 0.2*|mi|, where mi=0 is adjusted to mi=1
(as in gsea implementation).
Can work only on data with two classes. If there are multiple class, then
relevant class values can be specified on object initialization.
By default the relevant classes are first and second class value
from the domain.
"""
def __init__(self, a=None, b=None):
"""
a and b are choosen class values.
"""
self.a = a
self.b = b
def __call__(self, i, data):
cv = data.domain.classVar
#print data.domain
if self.a == None: self.a = cv.values[0]
if self.b == None: self.b = cv.values[1]
def stdev(l):
return statc.std(l)
def mean(l):
return statc.mean(l)
def stdevm(l):
m = mean(l)
std = stdev(l)
#return minmally 0.2*|mi|, where mi=0 is adjusted to mi=1
return max(std, 0.2*abs(1.0 if m == 0 else m))
def avWCVal(value):
return [ex[i].value for ex in data if ex[-1].value == value and not ex[i].isSpecial() ]
exa = avWCVal(self.a)
exb = avWCVal(self.b)
try:
rval = (mean(exa)-mean(exb))/(stdevm(exa)+stdevm(exb))
return rval
except:
#return some "middle" value -
#TODO rather throw exception?
return 0
class MA_t_test(object):
def __init__(self, a=None, b=None, prob=False):
self.a = a
self.b = b
self.prob = prob
def __call__(self, i, data):
cv = data.domain.classVar
#print data.domain
#for faster computation. to save dragging many attributes along
dom2 = orange.Domain([data.domain[i]], data.domain.classVar)
data = orange.ExampleTable(dom2, data)
i = 0
if self.a == None: self.a = cv.values[0]
if self.b == None: self.b = cv.values[1]
def avWCVal(value):
return [ex[i].value for ex in data if ex[cv] == value and not ex[i].isSpecial() ]
exa = avWCVal(self.a)
exb = avWCVal(self.b)
try:
t, prob = stats.lttest_ind(exa, exb)
return prob if self.prob else t
except:
return 1.0 if self.prob else 0.0
class MA_fold_change(object):
def __init__(self, a=None, b=None):
self.a = a
self.b = b
def __call__(self, i, data):
cv = data.domain.classVar
#print data.domain
#for faster computation. to save dragging many attributes along
dom2 = orange.Domain([data.domain[i]], data.domain.classVar)
data = orange.ExampleTable(dom2, data)
i = 0
if self.a == None: self.a = cv.values[0]
if self.b == None: self.b = cv.values[1]
def avWCVal(value):
return [ex[i].value for ex in data if ex[cv] == value and not ex[i].isSpecial() ]
exa = avWCVal(self.a)
exb = avWCVal(self.b)
try:
return mean(exa)/mean(exb)
except:
return 1
class MA_anova(object):
def __init__(self, prob=False):
self.prob = prob
def __call__(self, i, data):
cv = data.domain.classVar
#print data.domain
#for faster computation. to save dragging many attributes along
dom2 = orange.Domain([data.domain[i]], data.domain.classVar)
data = orange.ExampleTable(dom2, data)
i = 0
def avWCVal(value):
return [ex[i].value for ex in data if ex[cv] == value and not ex[i].isSpecial() ]
data = [avWCVal(val) for val in cv.values]
try:
f, prob = stats.lF_oneway(*tuple(data))
return prob if self.prob else f
except:
return 1.0 if self.prob else 0.0
import numpy as np
import numpy.ma as ma
class ExpressionSignificance_Test(object):
def __new__(cls, data, useAttributeLabels, **kwargs):
self = object.__new__(cls)
if kwargs:
self.__init__(data, useAttributeLabels)
return self.__call__(**kwargs)
else:
return self
def __init__(self, data, useAttributeLabels=False):
self.data = data
self.useAttributeLabels = useAttributeLabels
self.attr_labels, self.data_classes = self._data_info(data)
self.attributes = [attr for attr in self.data.domain.attributes if attr.varType in [orange.VarTypes.Continuous, orange.VarTypes.Discrete]]
self.classes = np.array(self.attr_labels if useAttributeLabels else self.data_classes)
self.keys = range(len(data)) if useAttributeLabels else self.attributes
self.array, _, _ = data.toNumpyMA()
if self.useAttributeLabels:
self.array = ma.transpose(self.array)
# self.dim = 1 if useAttributeLabels else 0
self.dim = 0
def _data_info(self, data):
return [set(attr.attributes.items()) for attr in data.domain.attributes], [ex.getclass() for ex in data] if data.domain.classVar else [None]*len(data)
def test_indices(self, target, classes=None):
classes = self.classes if classes is None else classes
def target_set(target):
if isinstance(target, tuple):
return set([target])
else:
assert(isinstance(target, set))
return target
if self.useAttributeLabels:
if isinstance(target, list):
ind = [[i for i, cl in enumerate(self.classes) if target_set(t).intersection(cl)] for t in target]
else:
target = target_set(target)
ind1 = [i for i, cl in enumerate(self.classes) if target.intersection(cl)]
ind2 = [i for i, cl in enumerate(self.classes) if not target.intersection(cl)]
ind = [ind1, ind2]
else:
if isinstance(target, list):
ind = [ma.nonzero(self.classes == t)[0] for t in target]
else:
if isinstance(target, (basestring, orange.Variable)):
target = set([target])
else:
assert(isinstance(target, set))
target = list(target)
ind1 = [i for i, cl in enumerate(self.classes) if cl in target]
ind2 = [i for i, cl in enumerate(self.classes) if cl not in target]
ind = [ind1, ind2]
return ind
def __call__(self, target):
raise NotImplementedError()
def null_distribution(self, num, *args, **kwargs):
kwargs = dict(kwargs)
advance = lambda: None
if "advance" in kwargs:
advance = kwargs["advance"]
del kwargs["advance"]
results = []
originalClasses = self.classes.copy()
for i in range(num):
np.random.shuffle(self.classes)
results.append(self.__call__(*args, **kwargs))
advance()
self.classes = originalClasses
return results
class ExpressionSignificance_TTest(ExpressionSignificance_Test):
def __call__(self, target):
ind1, ind2 = self.test_indices(target)
t, pval = attest_ind(self.array[ind1, :], self.array[ind2, :], dim=self.dim)
return zip(self.keys, zip(t, pval))
class ExpressionSignificance_FoldChange(ExpressionSignificance_Test):
def __call__(self, target):
ind1, ind2 = self.test_indices(target)
a1, a2 = self.array[ind1, :], self.array[ind2, :]
fold = ma.mean(a1, self.dim)/ma.mean(a2, self.dim)
return zip(self.keys, fold)
class ExpressionSignificance_SignalToNoise(ExpressionSignificance_Test):
def __call__(self, target):
ind1, ind2 = self.test_indices(target)
a1, a2 = self.array[ind1, :], self.array[ind2, :]
stn = (ma.mean(a1, self.dim) - ma.mean(a2, self.dim)) / (ma.sqrt(ma.var(a1, self.dim)) + ma.sqrt(ma.var(a2, self.dim)))
return zip(self.keys, stn)
class ExpressionSignificance_ANOVA(ExpressionSignificance_Test):
def __call__(self, target=None):
if target is not None:
indices = self.test_indices(target)
else:
indices = []
f, prob = aF_oneway(*[self.array[ind, :] for ind in indices], **dict(dim=0))
return zip(self.keys, zip(f, prob))
class ExpressionSignificance_ChiSquare(ExpressionSignificance_Test):
def __call__(self, target):
array = equi_n_discretization(self.array.copy(), intervals=5, dim=0)
ind1, ind2 = self.test_indices(target)
a1, a2 = array[ind1, :], array[ind2, :]
dist1, dist2 = [], []
dist = ma.zeros((array.shape[1], 2, 5))
for i in range(5):
dist1.append(ma.sum(ma.ones(a1.shape) * (a1 == i), 0))
dist2.append(ma.sum(ma.ones(a2.shape) * (a2 == i), 0))
dist[:, 0, i] = dist1[-1]
dist[:, 1, i] = dist2[-1]
return zip(self.keys, achisquare_indtest(np.array(dist), dim=1))
class ExpressionSignificance_Info(ExpressionSignificance_Test):
def __call__(self, target):
array = equi_n_discretization(self.array.copy(), intervals=5, dim=1)
ind1, ind2 = self.test_indices(target)
a1, a2 = array[ind1, :], array[ind2, :]
dist1, dist2 = [], []
dist = ma.zeros((array.shape[1], 2, 5))
for i in range(5):
dist1.append(ma.sum(ma.ones(a1.shape) * (a1 == i), 0))
dist2.append(ma.sum(ma.ones(a2.shape) * (a2 == i), 0))
dist[:, 0, i] = dist1[-1]
dist[:, 1, i] = dist2[-1]
classinfo = entropy(np.array([len(ind1), len(ind2)]))
E = ma.sum(entropy(dist, dim=1) * ma.sum(dist, 1), 1) / ma.sum(ma.sum(dist, 1), 1)
return zip(self.keys, classinfo - E)
class ExpressionSignificance_MannWhitneyu(ExpressionSignificance_Test):
def __call__(self, target):
ind1, ind2 = self.test_indices(target)
a, b = self.array[ind1, :], self.array[ind2, :]
# results = [amannwhitneyu(a[:, i],b[:, i]) for i in range(a.shape[1])]
results = [statc.mannwhitneyu(list(a[:, i]),list(b[:, i])) for i in range(a.shape[1])]
return zip(self.keys, results)
def attest_ind(a, b, dim=None):
""" Return the t-test statistics on arrays a and b over the dim axis.
Returns both the t statistic as well as the p-value
"""
# dim = a.ndim - 1 if dim is None else dim
x1, x2 = ma.mean(a, dim), ma.mean(b, dim)
v1, v2 = ma.var(a, dim), ma.var(b, dim)
n1, n2 = (a.shape[dim], b.shape[dim]) if dim is not None else (a.size, b.size)
df = float(n1+n2-2)
svar = ((n1-1)*v1+(n2-1)*v2) / df
t = (x1-x2)/ma.sqrt(svar*(1.0/n1 + 1.0/n2))
if t.ndim == 0:
return (t, statc.betai(0.5*df,0.5,df/(df+t**2)) if t is not ma.masked and df/(df+t**2) <= 1.0 else ma.masked)
else:
prob = [statc.betai(0.5*df,0.5,df/(df+tsq)) if tsq is not ma.masked and df/(df+tsq) <= 1.0 else ma.masked for tsq in t*t]
return t, prob
def aF_oneway(*args, **kwargs):
dim = kwargs.get("dim", None)
arrays = args
means = [ma.mean(a, dim) for a in arrays]
vars = [ma.var(a, dim) for a in arrays]
lens = [ma.sum(ma.array(ma.ones(a.shape), mask=ma.asarray(a).mask), dim) for a in arrays]
alldata = ma.concatenate(arrays, dim if dim is not None else 0)
bign = ma.sum(ma.array(ma.ones(alldata.shape), mask=alldata.mask), dim)
sstot = ma.sum(alldata ** 2, dim) - (ma.sum(alldata, dim) ** 2) / bign
ssbn = ma.sum([(ma.sum(a, dim) ** 2) / L for a, L in zip(arrays, lens)], dim)
# print ma.sum(alldata, dim) ** 2 / bign, ssbn
ssbn -= ma.sum(alldata, dim) ** 2 / bign
sswn = sstot - ssbn
dfbn = dfnum = float(len(args) - 1.0)
dfwn = bign - len(args) # + 1.0
F = (ssbn / dfbn) / (sswn / dfwn)
if F.ndim == 0 and dfwn.ndim == 0:
return (F,statc.betai(0.5 * dfwn, 0.5 * dfnum, dfwn/float(dfwn+dfnum*F)) if F is not ma.masked and dfwn/float(dfwn+dfnum*F) <= 1.0 \
and dfwn/float(dfwn+dfnum*F) >= 0.0 else ma.masked)
else:
prob = [statc.betai(0.5 * dfden, 0.5 * dfnum, dfden/float(dfden+dfnum*f)) if f is not ma.masked and dfden/float(dfden+dfnum*f) <= 1.0 \
and dfden/float(dfden+dfnum*f) >= 0.0 else ma.masked for dfden, f in zip (dfwn, F)]
return F, prob
def achisquare_indtest(observed, dim=None):
if observed.ndim == 2:
observed = ma.array([observed])
if dim is not None:
dim += 1
if dim is None:
dim = observed.ndim - 2
rowtotal = ma.sum(observed, dim + 1)
coltotal = ma.sum(observed, dim)
total = ma.sum(rowtotal, dim)
ones = ma.array(ma.ones(observed.shape))
expected = ones * rowtotal.reshape(rowtotal.shape[:dim] + (-1, 1))
a = ones * coltotal[..., np.zeros(observed.shape[dim], dtype=int),:]
expected = expected * (a) / total.reshape((-1, 1, 1))
chisq = ma.sum(ma.sum((observed - expected) ** 2 / expected, dim + 1), dim)
return chisq
def equi_n_discretization(array, intervals=5, dim=1):
count = ma.sum(ma.array(ma.ones(array.shape, dtype=int), mask=array.mask), dim)
cut = ma.zeros(len(count), dtype=int)
sarray = ma.sort(array, dim)
r = count % intervals
pointsshape = list(array.shape)
pointsshape[dim] = 1
points = []
for i in range(intervals):
cutend = cut + count / intervals + numpy.ones(len(r)) * (r > i)
if dim == 1:
p = sarray[range(len(cutend)), numpy.array(cutend, dtype=int) -1]
else:
p = sarray[numpy.array(cutend, dtype=int) -1, range(len(cutend))]
points.append(p.reshape(pointsshape))
cut = cutend
darray = ma.array(ma.zeros(array.shape) - 1, mask=array.mask)
darray[ma.nonzero(array <= points[0])] = 0
for i in range(0, intervals):
darray[ma.nonzero((array > points[i]))] = i + 1
return darray
def entropy(array, dim=None):
if dim is None:
array = array.ravel()
dim = 0
n = ma.sum(array, dim)
array = ma.log(array) * array
sum = ma.sum(array, dim)
return (ma.log(n) - sum / n) / ma.log(2.0)
"""\
MA - Plot
=========
Functions for normalization of expression arrays and ploting
MA - Plots
Example::
## Load data from GEO
>>> data = orange.ExampleTable("GDS1210.tab")
## Split data by columns into normal and cancer subsets
>>> cancer, normal = data_split(data, [("disease state", "cancer"), ("disease state", "normal")])
## Convert to numpy MaskedArrays
>>> cancer, normal = cancer.toNumpyMA("A")[0], normal.toNumpyMA("A")[0]
## Merge by averaging
>>> cancer = merge_replicates(cancer)
>>> normal = merge_replicates(normal)
## Plot MA-plot
>>> MA_plot(cancer, normal)
"""
from Orange.orng import orngMisc
from numpy import median
def lowess(x, y, f=2./3., iter=3, progressCallback=None):
""" Lowess taken from Bio.Statistics.lowess, modified to compute pairwise
distances inplace.
lowess(x, y, f=2./3., iter=3) -> yest
Lowess smoother: Robust locally weighted regression.
The lowess function fits a nonparametric regression curve to a scatterplot.
The arrays x and y contain an equal number of elements; each pair
(x[i], y[i]) defines a data point in the scatterplot. The function returns
the estimated (smooth) values of y.
The smoothing span is given by f. A larger value for f will result in a
smoother curve. The number of robustifying iterations is given by iter. The
function will run faster with a smaller number of iterations.
x and y should be numpy float arrays of equal length. The return value is
also a numpy float array of that length.
e.g.
>>> import numpy
>>> x = numpy.array([4, 4, 7, 7, 8, 9, 10, 10, 10, 11, 11, 12, 12, 12,
... 12, 13, 13, 13, 13, 14, 14, 14, 14, 15, 15, 15, 16, 16,
... 17, 17, 17, 18, 18, 18, 18, 19, 19, 19, 20, 20, 20, 20,
... 20, 22, 23, 24, 24, 24, 24, 25], numpy.float)
>>> y = numpy.array([2, 10, 4, 22, 16, 10, 18, 26, 34, 17, 28, 14, 20, 24,
... 28, 26, 34, 34, 46, 26, 36, 60, 80, 20, 26, 54, 32, 40,
... 28, 26, 34, 34, 46, 26, 36, 60, 80, 20, 26, 54, 32, 40,
... 32, 40, 50, 42, 56, 76, 84, 36, 46, 68, 32, 48, 52, 56,
... 64, 66, 54, 70, 92, 93, 120, 85], numpy.float)
>>> result = lowess(x, y)
>>> len(result)
50
>>> print "[%0.2f, ..., %0.2f]" % (result[0], result[-1])
[4.85, ..., 84.98]
"""
n = len(x)
r = min(int(numpy.ceil(f*n)), n - 1)
# h = [numpy.sort(numpy.abs(x-x[i]))[r] for i in range(n)]
# h, xtmp = numpy.zeros_like(x), numpy.zeros_like(x)
# for i in range(n):
# xtmp = numpy.abs(x - x[i], xtmp)
# h[i] = numpy.sort(xtmp)[r]
# w = numpy.clip(numpy.abs(([x]-numpy.transpose([x]))/h),0.0,1.0)
dist = [x] - numpy.transpose([x])
dist = numpy.abs(dist, dist)
dist.sort(axis=1)
h = dist[:, r]
del dist
w = [x]-numpy.transpose([x])
w /= h
w = numpy.abs(w, w)
w = numpy.clip(w, 0.0, 1.0, w)
# w = 1-w*w*w
w **= 3
w *= -1
w += 1
# w = w*w*w
w **= 3
yest = numpy.zeros(n)
delta = numpy.ones(n)
milestones = orngMisc.progressBarMilestones(iter*n)
for iteration in range(iter):
for i in xrange(n):
weights = delta * w[:,i]
weights_mul_x = weights * x
b1 = numpy.ma.dot(weights,y)
b2 = numpy.ma.dot(weights_mul_x,y)
A11 = sum(weights)
A12 = sum(weights_mul_x)
A21 = A12
A22 = numpy.ma.dot(weights_mul_x,x)
determinant = A11*A22 - A12*A21
beta1 = (A22*b1-A12*b2) / determinant
beta2 = (A11*b2-A21*b1) / determinant
yest[i] = beta1 + beta2*x[i]
if progressCallback and (iteration*n + i) in milestones:
progressCallback((100. * iteration*n + i) / (iter * n))
residuals = y-yest
s = median(abs(residuals))
delta[:] = numpy.clip(residuals/(6*s),-1,1)
delta[:] = 1-delta*delta
delta[:] = delta*delta
return yest
def lowess2(x, y, xest, f=2./3., iter=3, progressCallback=None):
"""Returns estimated values of y in data points xest (or None if estimation fails).
Lowess smoother: Robust locally weighted regression.
The lowess function fits a nonparametric regression curve to a scatterplot.
The arrays x and y contain an equal number of elements; each pair
(x[i], y[i]) defines a data point in the scatterplot. The function returns
the estimated (smooth) values of y.
The smoothing span is given by f. A larger value for f will result in a
smoother curve. The number of robustifying iterations is given by iter. The
function will run faster with a smaller number of iterations.
Taken from <NAME>'s numpyExtn.py, modified for numpy, computes pairwise
distances inplace
"""
x = numpy.asarray(x, 'f')
y = numpy.asarray(y, 'f')
xest = numpy.asarray(xest, 'f')
n = len(x)
nest = len(xest)
r = min(int(numpy.ceil(f*n)),n-1) # radius: num. of points to take into LR
# h = [numpy.sort(numpy.abs(x-x[i]))[r] for i in range(n)] # distance of the r-th point from x[i]
dist = [x] - numpy.transpose([x])
dist = numpy.abs(dist, dist)
dist.sort(axis=1)
h = dist[:, r]
del dist # to free memory
w = [x] - numpy.transpose([x])
w /= h
w = numpy.abs(w, w)
w = numpy.clip(w, 0.0, 1.0, w)
# w = numpy.clip(numpy.abs(([x]-numpy.transpose([x]))/h),0.0,1.0)
w **= 3
w *= -1
w += 1
# w = 1 - w**3 #1-w*w*w
w **= 3
# w = w**3 #w*w*w
# hest = [numpy.sort(numpy.abs(x-xest[i]))[r] for i in range(nest)] # r-th min. distance from xest[i] to x
dist = [x] - numpy.transpose([xest])
dist = numpy.abs(dist, dist)
dist.sort(axis=1)
hest = dist[:, r]
del dist # to free memory
# west = numpy.clip(numpy.abs(([xest]-numpy.transpose([x]))/hest),0.0,1.0) # shape: (len(x), len(xest)
west = [xest]-numpy.transpose([x])
west /= hest
west = numpy.abs(west, west)
west = numpy.clip(west, 0.0, 1.0, west)
# west = 1 - west**3 #1-west*west*west
west **= 3
west *= -1
west += 1
# west = west**3 #west*west*west
west **= 3
yest = numpy.zeros(n,'f')
yest2 = numpy.zeros(nest,'f')
delta = numpy.ones(n,'f')
iter_count = iter*(nest + n) if iter > 1 else nest
milestones = orngMisc.progressBarMilestones(iter_count)
curr_iter = 0
for iteration in range(iter):
# fit xest
for i in range(nest):
weights = delta * west[:,i]
b = numpy.array([numpy.sum(weights*y), numpy.sum(weights*y*x)])
A = numpy.array([[numpy.sum(weights), numpy.sum(weights*x)], [numpy.sum(weights*x), numpy.sum(weights*x*x)]])
beta = numpy.linalg.solve(A, b)
yest2[i] = beta[0] + beta[1]*xest[i]
if progressCallback and curr_iter in milestones:
progressCallback(100. * curr_iter / iter_count)
curr_iter += 1
# fit x (to calculate residuals and delta)
if iter > 1:
for i in range(n):
weights = delta * w[:,i]
b = numpy.array([numpy.sum(weights*y), numpy.sum(weights*y*x)])
A = numpy.array([[numpy.sum(weights), numpy.sum(weights*x)], [numpy.sum(weights*x), numpy.sum(weights*x*x)]])
beta = numpy.linalg.solve(A,b)
yest[i] = beta[0] + beta[1]*x[i]
if progressCallback and curr_iter in milestones:
progressCallback(100. * curr_iter / iter_count)
curr_iter += 1
residuals = y-yest
s = numpy.median(numpy.abs(residuals))
delta = numpy.clip(residuals/(6*s), -1, 1)
delta = 1-delta*delta
delta = delta*delta
return yest2
def attr_group_indices(data, label_groups):
""" Return a two or more lists of indices into `data.domain` based on `label_groups`
Example::
cancer_indices, no_cancer_indices = attr_group_indices(data, [("disease state", "cancer"), ("disease state", "normal")])
"""
ret = []
for key, val in label_groups:
ind = [i for i, attr in enumerate(data.domain.attributes) if attr.attributes.get(key, None) == val]
ret.append(ind)
return ret
def example_group_indices(data, attr, values):
""" Return lists of indices into `data` for each `values` item that matches
the example value at `attr` attribute
Example::
cls_ind1, cls_ind2 = example_group_indices(data, data.domain.classVar, ["Class 1", "Class 2"])
"""
ret = [[] for _ in values]
values_id = dict([(str(value), i) for i, value in enumerate(values)])
for i, ex in enumerate(data):
id = values_id.get(str(ex[attr]), None)
if id is not None:
ret[id].append(i)
return ret
def data_group_split(data, label_groups):
""" Split an `data` example table into two or more based on
contents of iterable `label_groups` containing (key, value)
pairs matching the labels of data attributes.
Example::
cancer, no_cancer = data_group_split(data, [("disease state", "cancer"), ("disease state", "normal")])
"""
ret = []
group_indices = attr_group_indices(data, label_groups)
for indices in group_indices:
attrs = [data.domain[i] for i in indices]
domain = orange.Domain(attrs, data.domain.classVar)
domain.addmetas(data.domain.getmetas())
ret.append(orange.ExampleTable(domain, data))
return ret
def select_indices(data, key, value, axis=1):
""" Return indices into `data` (ExampleTable) along specified `axis`
where:
- if axis == 0 match data[i][key] == value
- if axis == 1 match data.domain[i].attributes[key] == value
Example::
cancer_ind = select_indices(data, key="disease state", value="cancer"), axis=1)
normal_ind = select_indices(data, key="disease state", value=["normal"], axis=1) # value can be a list to specify more then one value
"""
values = value if isinstance(value, list) else [value]
if axis == 0:
groups = example_group_indices(data, key, values)
else:
groups = attr_group_indices(data, [(key, val) for val in values])
return sorted(reduce(set.union, groups, set()))
def select_data(data, key, value, axis=1):
""" Return `data` (ExampleTable) subset along specified `axis` where
where:
- if axis == 0 match data[i][key] == value
- if axis == 1 match data.domain[i].attributes[key] == value
.. note:: This preserves all meta attributes of the domain
Example::
cancer = select_data(data, "disease state", "cancer", axis=1)
normal = select_data(data, "disease state", ["normal"], axis=1) # value can be a list to specify more then one value
"""
indices = select_indices(data, key, value, axis)
if axis == 0:
examples = [data[i] for i in indices]
return orange.ExampleTable(data.domain, examples)
else:
attrs = [data.domain[i] for i in indices]
domain = orange.Domain(attrs, False)
domain.addmetas(data.domain.getmetas())
return orange.ExampleTable(domain, data)
def split_data(data, groups, axis=1):
""" Split data (ExampleTable) along specified axis, where elements of
`groups` match `key` and `value` arguments of the `select_data`
function
Example::
cancer, normal = split_data(data, [("disease state", "cancer"), ("disease state", ["normal"])], axis=1)
"""
res = []
for key, value in groups:
res.append(select_data(data, key, value, axis))
return res
def geometric_mean(array):
""" Return a geometric mean computed on a 1d masked array
"""
array = numpy.ma.asanyarray(array)
return numpy.power(reduce(lambda a,b: a*b, array.filled(1.), 1.0), 1./len(array))
def harmonic_mean(array):
""" Return a harmonic mean computed ona a 1d masked array
"""
array = numpy.ma.asanyarray(array)
return len(array) / numpy.ma.sum(1. / array)
def merge_replicates(replicates, axis=0, merge_function=numpy.ma.average):
""" Merge `replicates` (numpy.array) along `axis` using `merge_function`
"""
return numpy.ma.apply_along_axis(merge_function, axis, replicates)
def ratio_intensity(G, R):
""" return the log2(R/G), log10(R*G) as a tuple
"""
log2Ratio = numpy.ma.log(R/G) / numpy.log(2)
log10Intensity = numpy.ma.log10(R*G)
return log2Ratio, log10Intensity
def MA_center_average(G, R):
""" return the G, R by centering the average log2 ratio
"""
center_est = numpy.ma.average(numpy.ma.log(R/G) / numpy.log(2))
G = G * numpy.exp2(center_est)
return G, R.copy()
def MA_center_lowess(G, R, f=2./3., iter=1, progressCallback=None):
""" return the G, R by centering the average log2 ratio locally
depending on the intensity using lowess (locally weighted linear regression)
"""
# from Bio.Statistics.lowess import lowess
ratio, intensity = ratio_intensity(G, R)
valid = - (ratio.mask & intensity.mask)
valid_ind = numpy.ma.where(valid)
center_est = lowess(intensity[valid], ratio[valid], f=f, iter=iter, progressCallback=progressCallback)
Gc, R = G.copy(), R.copy()
Gc[valid] *= numpy.exp2(center_est)
Gc.mask, R.mask = -valid, -valid
return Gc, R
def MA_center_lowess_fast(G, R, f=2./3., iter=1, resolution=100, progressCallback=None):
"""return the G, R by centering the average log2 ratio locally
depending on the intensity using lowess (locally weighted linear regression),
approximated only in a limited resolution.
"""
ratio, intensity = ratio_intensity(G, R)
valid = - (ratio.mask & intensity.mask)
resoluiton = min(resolution, len(intensity[valid]))
hist, edges = numpy.histogram(intensity[valid], len(intensity[valid])/resolution)
progressCallback2 = (lambda val: progressCallback(val/2)) if progressCallback else None
centered = lowess2(intensity[valid], ratio[valid], edges, f, iter, progressCallback=progressCallback2)
progressCallback2 = (lambda val: progressCallback(50 + val/2)) if progressCallback else None
centered = lowess2(edges, centered, intensity[valid], f, iter, progressCallback=progressCallback2)
Gc, R = G.copy(), R.copy()
Gc[valid] *= numpy.exp2(centered)
Gc.mask, R.mask = -valid, -valid
return Gc, R
def MA_plot(G, R, format="b."):
""" Plot G, R on a MA-plot using matplotlib
"""
import matplotlib.pyplot as plt
ratio, intensity = ratio_intensity(G, R)
plt.plot(intensity, ratio, format)
plt.ylabel('M = log2(R/G')
plt.xlabel('A = log10(R*G)')
def normalize_expression_data(data, groups, axis=1, merge_function=numpy.ma.average, center_function=MA_center_lowess_fast):
""" A helper function that normalizes expression array example table, by centering the MA plot.
"""
if isinstance(data, orange.ExampleTable):
label_groups = [select_indices(data, key, value, axis) for key, value in groups]
array, _, _ = data.toNumpyMA()
merged = []
for indices in label_groups:
replicates = numpy.take(array, indices, axis=1)
merged.append(merge_replicates(replicates, axis=1, merge_function=merge_function))
ind1, ind2 = label_groups
G, R = merged
Gc, Rc = center_function(G, R)
domain = orange.Domain(data.domain.attributes, data.domain.classVar)
domain.addmetas(data.domain.getmetas())
data = orange.ExampleTable(domain, data)
GFactors = Gc/G
if axis == 0:
for i, gf in zip(ind1, GFactors):
for attr in range(len(data[i])):
if not data[i][attr].isSpecial():
data[i][attr] = float(data[i][attr]) * gf
else:
for ex, gf in zip(data, GFactors):
for i in ind1:
if not ex[i].isSpecial():
ex[i] = float(ex[i]) * gf
return data
def MA_zscore(G, R, window=1./5., padded=False, progressCallback=None):
""" Return the Z-score of log2 fold ratio estimated from local
distribution of log2 fold ratio values on the MA-plot
"""
ratio, intensity = ratio_intensity(G, R)
z_scores = numpy.ma.zeros(G.shape)
sorted = list(numpy.ma.argsort(intensity))
import math, random
r = int(math.ceil(len(sorted)*window)) # number of window elements
def local_indices(i, sorted):
""" local indices in sorted (mirror padded if out of bounds)
"""
start, end = i - r/2, i + r/2 + r%2
pad_start , pad_end = [], []
if start < 0:
pad_start = sorted[:abs(start)]
random.shuffle(pad_start)
start = 0
if end > len(sorted):
pad_end = sorted[end - len(sorted):]
random.shuffle(pad_end)
end = len(sorted)
if padded:
return pad_start + sorted[start: end] + pad_end
else:
return sorted[start:end]
milestones = orngMisc.progressBarMilestones(len(sorted))
for i in range(len(sorted)):
indices = local_indices(i, sorted)
localRatio = numpy.take(ratio, indices)
local_std = numpy.ma.std(localRatio)
ind = sorted[i]
z_scores[ind] = ratio[ind] / local_std
if progressCallback and i in milestones:
progressCallback(100. * i / len(sorted))
z_scores._mask = - numpy.isfinite(z_scores)
return z_scores
|
[
"numpy.ma.sum",
"numpy.abs",
"numpy.sum",
"orange.ExampleTable",
"statc.mean",
"numpy.ma.where",
"random.shuffle",
"numpy.ones",
"numpy.clip",
"numpy.ma.log",
"numpy.ma.mean",
"numpy.ma.transpose",
"numpy.linalg.solve",
"numpy.ma.asarray",
"Orange.orng.orngMisc.progressBarMilestones",
"numpy.transpose",
"numpy.isfinite",
"numpy.ma.asanyarray",
"statc.betai",
"numpy.random.shuffle",
"numpy.ceil",
"numpy.corrcoef",
"numpy.asarray",
"numpy.ma.var",
"numpy.ma.argsort",
"numpy.ma.concatenate",
"matplotlib.pyplot.ylabel",
"numpy.ma.zeros",
"numpy.ma.sqrt",
"statc.std",
"numpy.ma.dot",
"orange.Domain",
"numpy.ma.apply_along_axis",
"numpy.exp2",
"numpy.log",
"matplotlib.pyplot.plot",
"numpy.ma.log10",
"numpy.zeros",
"numpy.ma.nonzero",
"numpy.ma.array",
"numpy.ma.std",
"numpy.array",
"numpy.ma.ones",
"numpy.ma.sort",
"numpy.take",
"matplotlib.pyplot.xlabel"
] |
[((12178, 12231), 'numpy.ma.concatenate', 'ma.concatenate', (['arrays', '(dim if dim is not None else 0)'], {}), '(arrays, dim if dim is not None else 0)\n', (12192, 12231), True, 'import numpy.ma as ma\n'), ((13455, 13480), 'numpy.ma.sum', 'ma.sum', (['observed', '(dim + 1)'], {}), '(observed, dim + 1)\n', (13461, 13480), True, 'import numpy.ma as ma\n'), ((13496, 13517), 'numpy.ma.sum', 'ma.sum', (['observed', 'dim'], {}), '(observed, dim)\n', (13502, 13517), True, 'import numpy.ma as ma\n'), ((13530, 13551), 'numpy.ma.sum', 'ma.sum', (['rowtotal', 'dim'], {}), '(rowtotal, dim)\n', (13536, 13551), True, 'import numpy.ma as ma\n'), ((14094, 14113), 'numpy.ma.sort', 'ma.sort', (['array', 'dim'], {}), '(array, dim)\n', (14101, 14113), True, 'import numpy.ma as ma\n'), ((14905, 14923), 'numpy.ma.sum', 'ma.sum', (['array', 'dim'], {}), '(array, dim)\n', (14911, 14923), True, 'import numpy.ma as ma\n'), ((14968, 14986), 'numpy.ma.sum', 'ma.sum', (['array', 'dim'], {}), '(array, dim)\n', (14974, 14986), True, 'import numpy.ma as ma\n'), ((17889, 17910), 'numpy.abs', 'numpy.abs', (['dist', 'dist'], {}), '(dist, dist)\n', (17898, 17910), False, 'import numpy\n'), ((18018, 18033), 'numpy.abs', 'numpy.abs', (['w', 'w'], {}), '(w, w)\n', (18027, 18033), False, 'import numpy\n'), ((18042, 18068), 'numpy.clip', 'numpy.clip', (['w', '(0.0)', '(1.0)', 'w'], {}), '(w, 0.0, 1.0, w)\n', (18052, 18068), False, 'import numpy\n'), ((18159, 18173), 'numpy.zeros', 'numpy.zeros', (['n'], {}), '(n)\n', (18170, 18173), False, 'import numpy\n'), ((18186, 18199), 'numpy.ones', 'numpy.ones', (['n'], {}), '(n)\n', (18196, 18199), False, 'import numpy\n'), ((18217, 18257), 'Orange.orng.orngMisc.progressBarMilestones', 'orngMisc.progressBarMilestones', (['(iter * n)'], {}), '(iter * n)\n', (18247, 18257), False, 'from Orange.orng import orngMisc\n'), ((19966, 19987), 'numpy.asarray', 'numpy.asarray', (['x', '"""f"""'], {}), "(x, 'f')\n", (19979, 19987), False, 'import numpy\n'), ((19996, 20017), 'numpy.asarray', 'numpy.asarray', (['y', '"""f"""'], {}), "(y, 'f')\n", (20009, 20017), False, 'import numpy\n'), ((20029, 20053), 'numpy.asarray', 'numpy.asarray', (['xest', '"""f"""'], {}), "(xest, 'f')\n", (20042, 20053), False, 'import numpy\n'), ((20322, 20343), 'numpy.abs', 'numpy.abs', (['dist', 'dist'], {}), '(dist, dist)\n', (20331, 20343), False, 'import numpy\n'), ((20469, 20484), 'numpy.abs', 'numpy.abs', (['w', 'w'], {}), '(w, w)\n', (20478, 20484), False, 'import numpy\n'), ((20493, 20519), 'numpy.clip', 'numpy.clip', (['w', '(0.0)', '(1.0)', 'w'], {}), '(w, 0.0, 1.0, w)\n', (20503, 20519), False, 'import numpy\n'), ((20849, 20870), 'numpy.abs', 'numpy.abs', (['dist', 'dist'], {}), '(dist, dist)\n', (20858, 20870), False, 'import numpy\n'), ((21119, 21140), 'numpy.abs', 'numpy.abs', (['west', 'west'], {}), '(west, west)\n', (21128, 21140), False, 'import numpy\n'), ((21152, 21184), 'numpy.clip', 'numpy.clip', (['west', '(0.0)', '(1.0)', 'west'], {}), '(west, 0.0, 1.0, west)\n', (21162, 21184), False, 'import numpy\n'), ((21333, 21352), 'numpy.zeros', 'numpy.zeros', (['n', '"""f"""'], {}), "(n, 'f')\n", (21344, 21352), False, 'import numpy\n'), ((21364, 21386), 'numpy.zeros', 'numpy.zeros', (['nest', '"""f"""'], {}), "(nest, 'f')\n", (21375, 21386), False, 'import numpy\n'), ((21398, 21416), 'numpy.ones', 'numpy.ones', (['n', '"""f"""'], {}), "(n, 'f')\n", (21408, 21416), False, 'import numpy\n'), ((21488, 21530), 'Orange.orng.orngMisc.progressBarMilestones', 'orngMisc.progressBarMilestones', (['iter_count'], {}), '(iter_count)\n', (21518, 21530), False, 'from Orange.orng import orngMisc\n'), ((27008, 27034), 'numpy.ma.asanyarray', 'numpy.ma.asanyarray', (['array'], {}), '(array)\n', (27027, 27034), False, 'import numpy\n'), ((27231, 27257), 'numpy.ma.asanyarray', 'numpy.ma.asanyarray', (['array'], {}), '(array)\n', (27250, 27257), False, 'import numpy\n'), ((27480, 27539), 'numpy.ma.apply_along_axis', 'numpy.ma.apply_along_axis', (['merge_function', 'axis', 'replicates'], {}), '(merge_function, axis, replicates)\n', (27505, 27539), False, 'import numpy\n'), ((27699, 27720), 'numpy.ma.log10', 'numpy.ma.log10', (['(R * G)'], {}), '(R * G)\n', (27713, 27720), False, 'import numpy\n'), ((28359, 28380), 'numpy.ma.where', 'numpy.ma.where', (['valid'], {}), '(valid)\n', (28373, 28380), False, 'import numpy\n'), ((28536, 28558), 'numpy.exp2', 'numpy.exp2', (['center_est'], {}), '(center_est)\n', (28546, 28558), False, 'import numpy\n'), ((29603, 29623), 'numpy.exp2', 'numpy.exp2', (['centered'], {}), '(centered)\n', (29613, 29623), False, 'import numpy\n'), ((29853, 29887), 'matplotlib.pyplot.plot', 'plt.plot', (['intensity', 'ratio', 'format'], {}), '(intensity, ratio, format)\n', (29861, 29887), True, 'import matplotlib.pyplot as plt\n'), ((29892, 29918), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""M = log2(R/G"""'], {}), "('M = log2(R/G')\n", (29902, 29918), True, 'import matplotlib.pyplot as plt\n'), ((29923, 29951), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""A = log10(R*G)"""'], {}), "('A = log10(R*G)')\n", (29933, 29951), True, 'import matplotlib.pyplot as plt\n'), ((30677, 30736), 'orange.Domain', 'orange.Domain', (['data.domain.attributes', 'data.domain.classVar'], {}), '(data.domain.attributes, data.domain.classVar)\n', (30690, 30736), False, 'import orange, statc\n'), ((30792, 30825), 'orange.ExampleTable', 'orange.ExampleTable', (['domain', 'data'], {}), '(domain, data)\n', (30811, 30825), False, 'import orange, statc\n'), ((31540, 31563), 'numpy.ma.zeros', 'numpy.ma.zeros', (['G.shape'], {}), '(G.shape)\n', (31554, 31563), False, 'import numpy\n'), ((338, 402), 'orange.Domain', 'orange.Domain', (['[data.domain.attributes[i]]', 'data.domain.classVar'], {}), '([data.domain.attributes[i]], data.domain.classVar)\n', (351, 402), False, 'import orange, statc\n'), ((419, 450), 'orange.ExampleTable', 'orange.ExampleTable', (['dom2', 'data'], {}), '(dom2, data)\n', (438, 450), False, 'import orange, statc\n'), ((2513, 2566), 'orange.Domain', 'orange.Domain', (['[data.domain[i]]', 'data.domain.classVar'], {}), '([data.domain[i]], data.domain.classVar)\n', (2526, 2566), False, 'import orange, statc\n'), ((2582, 2613), 'orange.ExampleTable', 'orange.ExampleTable', (['dom2', 'data'], {}), '(dom2, data)\n', (2601, 2613), False, 'import orange, statc\n'), ((3370, 3423), 'orange.Domain', 'orange.Domain', (['[data.domain[i]]', 'data.domain.classVar'], {}), '([data.domain[i]], data.domain.classVar)\n', (3383, 3423), False, 'import orange, statc\n'), ((3439, 3470), 'orange.ExampleTable', 'orange.ExampleTable', (['dom2', 'data'], {}), '(dom2, data)\n', (3458, 3470), False, 'import orange, statc\n'), ((4126, 4179), 'orange.Domain', 'orange.Domain', (['[data.domain[i]]', 'data.domain.classVar'], {}), '([data.domain[i]], data.domain.classVar)\n', (4139, 4179), False, 'import orange, statc\n'), ((4195, 4226), 'orange.ExampleTable', 'orange.ExampleTable', (['dom2', 'data'], {}), '(dom2, data)\n', (4214, 4226), False, 'import orange, statc\n'), ((5295, 5366), 'numpy.array', 'np.array', (['(self.attr_labels if useAttributeLabels else self.data_classes)'], {}), '(self.attr_labels if useAttributeLabels else self.data_classes)\n', (5303, 5366), True, 'import numpy as np\n'), ((9516, 9548), 'numpy.ma.zeros', 'ma.zeros', (['(array.shape[1], 2, 5)'], {}), '((array.shape[1], 2, 5))\n', (9524, 9548), True, 'import numpy.ma as ma\n'), ((10191, 10223), 'numpy.ma.zeros', 'ma.zeros', (['(array.shape[1], 2, 5)'], {}), '((array.shape[1], 2, 5))\n', (10199, 10223), True, 'import numpy.ma as ma\n'), ((11323, 11338), 'numpy.ma.mean', 'ma.mean', (['a', 'dim'], {}), '(a, dim)\n', (11330, 11338), True, 'import numpy.ma as ma\n'), ((11340, 11355), 'numpy.ma.mean', 'ma.mean', (['b', 'dim'], {}), '(b, dim)\n', (11347, 11355), True, 'import numpy.ma as ma\n'), ((11369, 11383), 'numpy.ma.var', 'ma.var', (['a', 'dim'], {}), '(a, dim)\n', (11375, 11383), True, 'import numpy.ma as ma\n'), ((11385, 11399), 'numpy.ma.var', 'ma.var', (['b', 'dim'], {}), '(b, dim)\n', (11391, 11399), True, 'import numpy.ma as ma\n'), ((11561, 11598), 'numpy.ma.sqrt', 'ma.sqrt', (['(svar * (1.0 / n1 + 1.0 / n2))'], {}), '(svar * (1.0 / n1 + 1.0 / n2))\n', (11568, 11598), True, 'import numpy.ma as ma\n'), ((11993, 12008), 'numpy.ma.mean', 'ma.mean', (['a', 'dim'], {}), '(a, dim)\n', (12000, 12008), True, 'import numpy.ma as ma\n'), ((12038, 12052), 'numpy.ma.var', 'ma.var', (['a', 'dim'], {}), '(a, dim)\n', (12044, 12052), True, 'import numpy.ma as ma\n'), ((12321, 12346), 'numpy.ma.sum', 'ma.sum', (['(alldata ** 2)', 'dim'], {}), '(alldata ** 2, dim)\n', (12327, 12346), True, 'import numpy.ma as ma\n'), ((13317, 13337), 'numpy.ma.array', 'ma.array', (['[observed]'], {}), '([observed])\n', (13325, 13337), True, 'import numpy.ma as ma\n'), ((13572, 13595), 'numpy.ma.ones', 'ma.ones', (['observed.shape'], {}), '(observed.shape)\n', (13579, 13595), True, 'import numpy.ma as ma\n'), ((13818, 13872), 'numpy.ma.sum', 'ma.sum', (['((observed - expected) ** 2 / expected)', '(dim + 1)'], {}), '((observed - expected) ** 2 / expected, dim + 1)\n', (13824, 13872), True, 'import numpy.ma as ma\n'), ((14655, 14685), 'numpy.ma.nonzero', 'ma.nonzero', (['(array <= points[0])'], {}), '(array <= points[0])\n', (14665, 14685), True, 'import numpy.ma as ma\n'), ((14936, 14949), 'numpy.ma.log', 'ma.log', (['array'], {}), '(array)\n', (14942, 14949), True, 'import numpy.ma as ma\n'), ((15022, 15033), 'numpy.ma.log', 'ma.log', (['(2.0)'], {}), '(2.0)\n', (15028, 15033), True, 'import numpy.ma as ma\n'), ((17857, 17877), 'numpy.transpose', 'numpy.transpose', (['[x]'], {}), '([x])\n', (17872, 17877), False, 'import numpy\n'), ((17978, 17998), 'numpy.transpose', 'numpy.transpose', (['[x]'], {}), '([x])\n', (17993, 17998), False, 'import numpy\n'), ((19029, 19067), 'numpy.clip', 'numpy.clip', (['(residuals / (6 * s))', '(-1)', '(1)'], {}), '(residuals / (6 * s), -1, 1)\n', (19039, 19067), False, 'import numpy\n'), ((20290, 20310), 'numpy.transpose', 'numpy.transpose', (['[x]'], {}), '([x])\n', (20305, 20310), False, 'import numpy\n'), ((20429, 20449), 'numpy.transpose', 'numpy.transpose', (['[x]'], {}), '([x])\n', (20444, 20449), False, 'import numpy\n'), ((20814, 20837), 'numpy.transpose', 'numpy.transpose', (['[xest]'], {}), '([xest])\n', (20829, 20837), False, 'import numpy\n'), ((21070, 21090), 'numpy.transpose', 'numpy.transpose', (['[x]'], {}), '([x])\n', (21085, 21090), False, 'import numpy\n'), ((24548, 24590), 'orange.Domain', 'orange.Domain', (['attrs', 'data.domain.classVar'], {}), '(attrs, data.domain.classVar)\n', (24561, 24590), False, 'import orange, statc\n'), ((26190, 26232), 'orange.ExampleTable', 'orange.ExampleTable', (['data.domain', 'examples'], {}), '(data.domain, examples)\n', (26209, 26232), False, 'import orange, statc\n'), ((26310, 26337), 'orange.Domain', 'orange.Domain', (['attrs', '(False)'], {}), '(attrs, False)\n', (26323, 26337), False, 'import orange, statc\n'), ((26401, 26434), 'orange.ExampleTable', 'orange.ExampleTable', (['domain', 'data'], {}), '(domain, data)\n', (26420, 26434), False, 'import orange, statc\n'), ((27282, 27307), 'numpy.ma.sum', 'numpy.ma.sum', (['(1.0 / array)'], {}), '(1.0 / array)\n', (27294, 27307), False, 'import numpy\n'), ((27645, 27664), 'numpy.ma.log', 'numpy.ma.log', (['(R / G)'], {}), '(R / G)\n', (27657, 27664), False, 'import numpy\n'), ((27665, 27677), 'numpy.log', 'numpy.log', (['(2)'], {}), '(2)\n', (27674, 27677), False, 'import numpy\n'), ((27935, 27957), 'numpy.exp2', 'numpy.exp2', (['center_est'], {}), '(center_est)\n', (27945, 27957), False, 'import numpy\n'), ((30441, 30475), 'numpy.take', 'numpy.take', (['array', 'indices'], {'axis': '(1)'}), '(array, indices, axis=1)\n', (30451, 30475), False, 'import numpy\n'), ((31582, 31609), 'numpy.ma.argsort', 'numpy.ma.argsort', (['intensity'], {}), '(intensity)\n', (31598, 31609), False, 'import numpy\n'), ((32475, 32501), 'numpy.take', 'numpy.take', (['ratio', 'indices'], {}), '(ratio, indices)\n', (32485, 32501), False, 'import numpy\n'), ((32522, 32546), 'numpy.ma.std', 'numpy.ma.std', (['localRatio'], {}), '(localRatio)\n', (32534, 32546), False, 'import numpy\n'), ((32752, 32776), 'numpy.isfinite', 'numpy.isfinite', (['z_scores'], {}), '(z_scores)\n', (32766, 32776), False, 'import numpy\n'), ((501, 527), 'numpy.corrcoef', 'numpy.corrcoef', (['c', 'a[:, 0]'], {}), '(c, a[:, 0])\n', (515, 527), False, 'import numpy\n'), ((1501, 1513), 'statc.std', 'statc.std', (['l'], {}), '(l)\n', (1510, 1513), False, 'import orange, statc\n'), ((1555, 1568), 'statc.mean', 'statc.mean', (['l'], {}), '(l)\n', (1565, 1568), False, 'import orange, statc\n'), ((5552, 5576), 'numpy.ma.transpose', 'ma.transpose', (['self.array'], {}), '(self.array)\n', (5564, 5576), True, 'import numpy.ma as ma\n'), ((7691, 7722), 'numpy.random.shuffle', 'np.random.shuffle', (['self.classes'], {}), '(self.classes)\n', (7708, 7722), True, 'import numpy as np\n'), ((8376, 8397), 'numpy.ma.mean', 'ma.mean', (['a1', 'self.dim'], {}), '(a1, self.dim)\n', (8383, 8397), True, 'import numpy.ma as ma\n'), ((8398, 8419), 'numpy.ma.mean', 'ma.mean', (['a2', 'self.dim'], {}), '(a2, self.dim)\n', (8405, 8419), True, 'import numpy.ma as ma\n'), ((12260, 12282), 'numpy.ma.ones', 'ma.ones', (['alldata.shape'], {}), '(alldata.shape)\n', (12267, 12282), True, 'import numpy.ma as ma\n'), ((12528, 12548), 'numpy.ma.sum', 'ma.sum', (['alldata', 'dim'], {}), '(alldata, dim)\n', (12534, 12548), True, 'import numpy.ma as ma\n'), ((13983, 14014), 'numpy.ma.ones', 'ma.ones', (['array.shape'], {'dtype': 'int'}), '(array.shape, dtype=int)\n', (13990, 14014), True, 'import numpy.ma as ma\n'), ((14600, 14621), 'numpy.ma.zeros', 'ma.zeros', (['array.shape'], {}), '(array.shape)\n', (14608, 14621), True, 'import numpy.ma as ma\n'), ((14740, 14769), 'numpy.ma.nonzero', 'ma.nonzero', (['(array > points[i])'], {}), '(array > points[i])\n', (14750, 14769), True, 'import numpy.ma as ma\n'), ((14999, 15008), 'numpy.ma.log', 'ma.log', (['n'], {}), '(n)\n', (15005, 15008), True, 'import numpy.ma as ma\n'), ((17521, 17538), 'numpy.ceil', 'numpy.ceil', (['(f * n)'], {}), '(f * n)\n', (17531, 17538), False, 'import numpy\n'), ((18412, 18436), 'numpy.ma.dot', 'numpy.ma.dot', (['weights', 'y'], {}), '(weights, y)\n', (18424, 18436), False, 'import numpy\n'), ((18453, 18483), 'numpy.ma.dot', 'numpy.ma.dot', (['weights_mul_x', 'y'], {}), '(weights_mul_x, y)\n', (18465, 18483), False, 'import numpy\n'), ((18591, 18621), 'numpy.ma.dot', 'numpy.ma.dot', (['weights_mul_x', 'x'], {}), '(weights_mul_x, x)\n', (18603, 18621), False, 'import numpy\n'), ((20106, 20123), 'numpy.ceil', 'numpy.ceil', (['(f * n)'], {}), '(f * n)\n', (20116, 20123), False, 'import numpy\n'), ((21889, 21913), 'numpy.linalg.solve', 'numpy.linalg.solve', (['A', 'b'], {}), '(A, b)\n', (21907, 21913), False, 'import numpy\n'), ((22844, 22882), 'numpy.clip', 'numpy.clip', (['(residuals / (6 * s))', '(-1)', '(1)'], {}), '(residuals / (6 * s), -1, 1)\n', (22854, 22882), False, 'import numpy\n'), ((24658, 24691), 'orange.ExampleTable', 'orange.ExampleTable', (['domain', 'data'], {}), '(domain, data)\n', (24677, 24691), False, 'import orange, statc\n'), ((27889, 27908), 'numpy.ma.log', 'numpy.ma.log', (['(R / G)'], {}), '(R / G)\n', (27901, 27908), False, 'import numpy\n'), ((27909, 27921), 'numpy.log', 'numpy.log', (['(2)'], {}), '(2)\n', (27918, 27921), False, 'import numpy\n'), ((31980, 32005), 'random.shuffle', 'random.shuffle', (['pad_start'], {}), '(pad_start)\n', (31994, 32005), False, 'import math, random\n'), ((32119, 32142), 'random.shuffle', 'random.shuffle', (['pad_end'], {}), '(pad_end)\n', (32133, 32142), False, 'import math, random\n'), ((8686, 8707), 'numpy.ma.mean', 'ma.mean', (['a1', 'self.dim'], {}), '(a1, self.dim)\n', (8693, 8707), True, 'import numpy.ma as ma\n'), ((8710, 8731), 'numpy.ma.mean', 'ma.mean', (['a2', 'self.dim'], {}), '(a2, self.dim)\n', (8717, 8731), True, 'import numpy.ma as ma\n'), ((9836, 9850), 'numpy.array', 'np.array', (['dist'], {}), '(dist)\n', (9844, 9850), True, 'import numpy as np\n'), ((10594, 10609), 'numpy.ma.sum', 'ma.sum', (['dist', '(1)'], {}), '(dist, 1)\n', (10600, 10609), True, 'import numpy.ma as ma\n'), ((11632, 11678), 'statc.betai', 'statc.betai', (['(0.5 * df)', '(0.5)', '(df / (df + t ** 2))'], {}), '(0.5 * df, 0.5, df / (df + t ** 2))\n', (11643, 11678), False, 'import orange, statc\n'), ((11757, 11800), 'statc.betai', 'statc.betai', (['(0.5 * df)', '(0.5)', '(df / (df + tsq))'], {}), '(0.5 * df, 0.5, df / (df + tsq))\n', (11768, 11800), False, 'import orange, statc\n'), ((12098, 12114), 'numpy.ma.ones', 'ma.ones', (['a.shape'], {}), '(a.shape)\n', (12105, 12114), True, 'import numpy.ma as ma\n'), ((12350, 12370), 'numpy.ma.sum', 'ma.sum', (['alldata', 'dim'], {}), '(alldata, dim)\n', (12356, 12370), True, 'import numpy.ma as ma\n'), ((13697, 13737), 'numpy.zeros', 'np.zeros', (['observed.shape[dim]'], {'dtype': 'int'}), '(observed.shape[dim], dtype=int)\n', (13705, 13737), True, 'import numpy as np\n'), ((22505, 22529), 'numpy.linalg.solve', 'numpy.linalg.solve', (['A', 'b'], {}), '(A, b)\n', (22523, 22529), False, 'import numpy\n'), ((22802, 22822), 'numpy.abs', 'numpy.abs', (['residuals'], {}), '(residuals)\n', (22811, 22822), False, 'import numpy\n'), ((8744, 8764), 'numpy.ma.var', 'ma.var', (['a1', 'self.dim'], {}), '(a1, self.dim)\n', (8750, 8764), True, 'import numpy.ma as ma\n'), ((8776, 8796), 'numpy.ma.var', 'ma.var', (['a2', 'self.dim'], {}), '(a2, self.dim)\n', (8782, 8796), True, 'import numpy.ma as ma\n'), ((10565, 10580), 'numpy.ma.sum', 'ma.sum', (['dist', '(1)'], {}), '(dist, 1)\n', (10571, 10580), True, 'import numpy.ma as ma\n'), ((12404, 12418), 'numpy.ma.sum', 'ma.sum', (['a', 'dim'], {}), '(a, dim)\n', (12410, 12418), True, 'import numpy.ma as ma\n'), ((21701, 21723), 'numpy.sum', 'numpy.sum', (['(weights * y)'], {}), '(weights * y)\n', (21710, 21723), False, 'import numpy\n'), ((21723, 21749), 'numpy.sum', 'numpy.sum', (['(weights * y * x)'], {}), '(weights * y * x)\n', (21732, 21749), False, 'import numpy\n'), ((6772, 6801), 'numpy.ma.nonzero', 'ma.nonzero', (['(self.classes == t)'], {}), '(self.classes == t)\n', (6782, 6801), True, 'import numpy.ma as ma\n'), ((9608, 9625), 'numpy.ma.ones', 'ma.ones', (['a1.shape'], {}), '(a1.shape)\n', (9615, 9625), True, 'import numpy.ma as ma\n'), ((9675, 9692), 'numpy.ma.ones', 'ma.ones', (['a2.shape'], {}), '(a2.shape)\n', (9682, 9692), True, 'import numpy.ma as ma\n'), ((10283, 10300), 'numpy.ma.ones', 'ma.ones', (['a1.shape'], {}), '(a1.shape)\n', (10290, 10300), True, 'import numpy.ma as ma\n'), ((10350, 10367), 'numpy.ma.ones', 'ma.ones', (['a2.shape'], {}), '(a2.shape)\n', (10357, 10367), True, 'import numpy.ma as ma\n'), ((12121, 12134), 'numpy.ma.asarray', 'ma.asarray', (['a'], {}), '(a)\n', (12131, 12134), True, 'import numpy.ma as ma\n'), ((14384, 14414), 'numpy.array', 'numpy.array', (['cutend'], {'dtype': 'int'}), '(cutend, dtype=int)\n', (14395, 14414), False, 'import numpy\n'), ((14456, 14486), 'numpy.array', 'numpy.array', (['cutend'], {'dtype': 'int'}), '(cutend, dtype=int)\n', (14467, 14486), False, 'import numpy\n'), ((21778, 21796), 'numpy.sum', 'numpy.sum', (['weights'], {}), '(weights)\n', (21787, 21796), False, 'import numpy\n'), ((21798, 21820), 'numpy.sum', 'numpy.sum', (['(weights * x)'], {}), '(weights * x)\n', (21807, 21820), False, 'import numpy\n'), ((21822, 21844), 'numpy.sum', 'numpy.sum', (['(weights * x)'], {}), '(weights * x)\n', (21831, 21844), False, 'import numpy\n'), ((21844, 21870), 'numpy.sum', 'numpy.sum', (['(weights * x * x)'], {}), '(weights * x * x)\n', (21853, 21870), False, 'import numpy\n'), ((22309, 22331), 'numpy.sum', 'numpy.sum', (['(weights * y)'], {}), '(weights * y)\n', (22318, 22331), False, 'import numpy\n'), ((22331, 22357), 'numpy.sum', 'numpy.sum', (['(weights * y * x)'], {}), '(weights * y * x)\n', (22340, 22357), False, 'import numpy\n'), ((22390, 22408), 'numpy.sum', 'numpy.sum', (['weights'], {}), '(weights)\n', (22399, 22408), False, 'import numpy\n'), ((22410, 22432), 'numpy.sum', 'numpy.sum', (['(weights * x)'], {}), '(weights * x)\n', (22419, 22432), False, 'import numpy\n'), ((22434, 22456), 'numpy.sum', 'numpy.sum', (['(weights * x)'], {}), '(weights * x)\n', (22443, 22456), False, 'import numpy\n'), ((22456, 22482), 'numpy.sum', 'numpy.sum', (['(weights * x * x)'], {}), '(weights * x * x)\n', (22465, 22482), False, 'import numpy\n')]
|
from __future__ import print_function
from PIL import Image
import numpy as np
import os
import cv2
import torch
import torch.nn.functional as F
import torchvision
import torchvision.transforms.functional as TF
import math
import pickle
class ImageTransformer(object):
"""
Rescale the image in a sample to a given size.
"""
def __init__(self, output_size):
"""
Args:
output_size (tuple or int): Desired output size. If tuple, output is matched to output_size.
If int, smaller of image edges is matched to output_size keeping aspect ratio the same.
"""
assert isinstance(output_size, (int, tuple))
self.output_size = output_size
def __call__(self, sample):
images = sample['images']
resized_images = []
for image in images:
image = cv2.resize(image, (self.output_size, self.output_size))
image = image.astype(np.float32)
image /= 255.0
image = image * 2 - 1
image = np.transpose(image, (2, 0, 1))
resized_images.append(image)
resized_images = np.stack(resized_images, axis=0)
sample['images'] = resized_images
return sample
class ImageNormalizeToTensor(object):
"""
Rescale the image in a sample to a given size.
"""
def __call__(self, image):
# image = F.to_tensor(image)
image = TF.to_tensor(image)
image.mul_(2.0)
image.sub_(1.0)
return image
class ToTensor(object):
"""
Convert ndarrays in sample to Tensors.
"""
def __call__(self, sample):
sample['images'] = torch.Tensor(sample['images']).float()
sample['smpls'] = torch.Tensor(sample['smpls']).float()
return sample
class ToTensorDensePose(object):
"""
Convert ndarrays in sample to Tensors.
"""
def __call__(self, sample):
sample['images'] = torch.Tensor(sample['images']).float()
sample['smpl'] = torch.Tensor(sample['smpl']).float()
sample['uvs'] = torch.Tensor(sample['uvs']).float()
sample['mask'] = torch.Tensor(sample['mask']).int()
return sample
def morph(src_bg_mask, ks, mode='erode', kernel=None):
n_ks = ks ** 2
pad_s = ks // 2
if kernel is None:
kernel = torch.ones(1, 1, ks, ks, dtype=torch.float32, device=src_bg_mask.device)
if mode == 'erode':
src_bg_mask_pad = F.pad(src_bg_mask, [pad_s, pad_s, pad_s, pad_s], value=1.0)
out = F.conv2d(src_bg_mask_pad, kernel)
out = (out == n_ks).float()
else:
src_bg_mask_pad = F.pad(src_bg_mask, [pad_s, pad_s, pad_s, pad_s], value=0.0)
out = F.conv2d(src_bg_mask_pad, kernel)
out = (out >= 1).float()
return out
def cal_mask_bbox(head_mask, factor=1.3):
"""
Args:
head_mask (np.ndarray): (N, 1, 256, 256).
factor (float): the factor to enlarge the bbox of head.
Returns:
bbox (np.ndarray.int32): (N, 4), hear, 4 = (left_top_x, right_top_x, left_top_y, right_top_y)
"""
bs, _, height, width = head_mask.shape
bbox = np.zeros((bs, 4), dtype=np.int32)
valid = np.ones((bs,), dtype=np.float32)
for i in range(bs):
mask = head_mask[i, 0]
ys, xs = np.where(mask == 1)
if len(ys) == 0:
valid[i] = 0.0
bbox[i, 0] = 0
bbox[i, 1] = width
bbox[i, 2] = 0
bbox[i, 3] = height
continue
lt_y = np.min(ys) # left top of Y
lt_x = np.min(xs) # left top of X
rt_y = np.max(ys) # right top of Y
rt_x = np.max(xs) # right top of X
h = rt_y - lt_y # height of head
w = rt_x - lt_x # width of head
cy = (lt_y + rt_y) // 2 # (center of y)
cx = (lt_x + rt_x) // 2 # (center of x)
_h = h * factor
_w = w * factor
_lt_y = max(0, int(cy - _h / 2))
_lt_x = max(0, int(cx - _w / 2))
_rt_y = min(height, int(cy + _h / 2))
_rt_x = min(width, int(cx + _w / 2))
if (_lt_x == _rt_x) or (_lt_y == _rt_y):
valid[i] = 0.0
bbox[i, 0] = 0
bbox[i, 1] = width
bbox[i, 2] = 0
bbox[i, 3] = height
else:
bbox[i, 0] = _lt_x
bbox[i, 1] = _rt_x
bbox[i, 2] = _lt_y
bbox[i, 3] = _rt_y
return bbox, valid
def to_tensor(tensor):
if isinstance(tensor, np.ndarray):
tensor = torch.FloatTensor(tensor)
return tensor
def plot_fim_enc(fim_enc, map_name):
# import matplotlib.pyplot as plt
import utils.mesh as mesh
if not isinstance(fim_enc, np.ndarray):
fim_enc = fim_enc.cpu().numpy()
if fim_enc.ndim != 4:
fim_enc = fim_enc[np.newaxis, ...]
fim_enc = np.transpose(fim_enc, axes=(0, 2, 3, 1))
imgs = []
for fim_i in fim_enc:
img = mesh.cvt_fim_enc(fim_i, map_name)
imgs.append(img)
return np.stack(imgs, axis=0)
def tensor2im(img, imtype=np.uint8, unnormalize=True, idx=0, nrows=None):
# select a sample or create grid if img is a batch
if len(img.shape) == 4:
nrows = nrows if nrows is not None else int(math.sqrt(img.size(0)))
img = img[idx] if idx >= 0 else torchvision.utils.make_grid(img, nrows)
img = img.cpu().float()
if unnormalize:
img += 1.0
img /= 2.0
image_numpy = img.numpy()
# image_numpy = np.transpose(image_numpy, (1, 2, 0))
image_numpy *= 255.0
return image_numpy.astype(imtype)
def tensor2maskim(mask, imtype=np.uint8, idx=0, nrows=1):
im = tensor2im(mask, imtype=imtype, idx=idx, unnormalize=False, nrows=nrows)
if im.shape[2] == 1:
im = np.repeat(im, 3, axis=-1)
return im
def mkdirs(paths):
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
return paths
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
return path
def clear_dir(path):
import shutil
if os.path.exists(path):
shutil.rmtree(path)
return mkdir(path)
def save_image(image_numpy, image_path):
mkdir(os.path.dirname(image_path))
image_pil = Image.fromarray(image_numpy)
image_pil.save(image_path)
def load_pickle_file(pkl_path):
with open(pkl_path, 'rb') as f:
data = pickle.load(f, encoding='latin1')
return data
def write_pickle_file(pkl_path, data_dict):
with open(pkl_path, 'wb') as fp:
pickle.dump(data_dict, fp, protocol=2)
|
[
"pickle.dump",
"torchvision.transforms.functional.to_tensor",
"numpy.ones",
"pickle.load",
"shutil.rmtree",
"torch.nn.functional.pad",
"torch.ones",
"os.path.dirname",
"numpy.transpose",
"os.path.exists",
"torch.FloatTensor",
"numpy.max",
"torch.Tensor",
"cv2.resize",
"numpy.repeat",
"numpy.stack",
"torch.nn.functional.conv2d",
"utils.mesh.cvt_fim_enc",
"numpy.min",
"os.makedirs",
"numpy.zeros",
"torchvision.utils.make_grid",
"numpy.where",
"PIL.Image.fromarray"
] |
[((3155, 3188), 'numpy.zeros', 'np.zeros', (['(bs, 4)'], {'dtype': 'np.int32'}), '((bs, 4), dtype=np.int32)\n', (3163, 3188), True, 'import numpy as np\n'), ((3201, 3233), 'numpy.ones', 'np.ones', (['(bs,)'], {'dtype': 'np.float32'}), '((bs,), dtype=np.float32)\n', (3208, 3233), True, 'import numpy as np\n'), ((4872, 4912), 'numpy.transpose', 'np.transpose', (['fim_enc'], {'axes': '(0, 2, 3, 1)'}), '(fim_enc, axes=(0, 2, 3, 1))\n', (4884, 4912), True, 'import numpy as np\n'), ((5039, 5061), 'numpy.stack', 'np.stack', (['imgs'], {'axis': '(0)'}), '(imgs, axis=0)\n', (5047, 5061), True, 'import numpy as np\n'), ((6161, 6181), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (6175, 6181), False, 'import os\n'), ((6333, 6361), 'PIL.Image.fromarray', 'Image.fromarray', (['image_numpy'], {}), '(image_numpy)\n', (6348, 6361), False, 'from PIL import Image\n'), ((1154, 1186), 'numpy.stack', 'np.stack', (['resized_images'], {'axis': '(0)'}), '(resized_images, axis=0)\n', (1162, 1186), True, 'import numpy as np\n'), ((1444, 1463), 'torchvision.transforms.functional.to_tensor', 'TF.to_tensor', (['image'], {}), '(image)\n', (1456, 1463), True, 'import torchvision.transforms.functional as TF\n'), ((2338, 2410), 'torch.ones', 'torch.ones', (['(1)', '(1)', 'ks', 'ks'], {'dtype': 'torch.float32', 'device': 'src_bg_mask.device'}), '(1, 1, ks, ks, dtype=torch.float32, device=src_bg_mask.device)\n', (2348, 2410), False, 'import torch\n'), ((2462, 2521), 'torch.nn.functional.pad', 'F.pad', (['src_bg_mask', '[pad_s, pad_s, pad_s, pad_s]'], {'value': '(1.0)'}), '(src_bg_mask, [pad_s, pad_s, pad_s, pad_s], value=1.0)\n', (2467, 2521), True, 'import torch.nn.functional as F\n'), ((2536, 2569), 'torch.nn.functional.conv2d', 'F.conv2d', (['src_bg_mask_pad', 'kernel'], {}), '(src_bg_mask_pad, kernel)\n', (2544, 2569), True, 'import torch.nn.functional as F\n'), ((2642, 2701), 'torch.nn.functional.pad', 'F.pad', (['src_bg_mask', '[pad_s, pad_s, pad_s, pad_s]'], {'value': '(0.0)'}), '(src_bg_mask, [pad_s, pad_s, pad_s, pad_s], value=0.0)\n', (2647, 2701), True, 'import torch.nn.functional as F\n'), ((2716, 2749), 'torch.nn.functional.conv2d', 'F.conv2d', (['src_bg_mask_pad', 'kernel'], {}), '(src_bg_mask_pad, kernel)\n', (2724, 2749), True, 'import torch.nn.functional as F\n'), ((3307, 3326), 'numpy.where', 'np.where', (['(mask == 1)'], {}), '(mask == 1)\n', (3315, 3326), True, 'import numpy as np\n'), ((3534, 3544), 'numpy.min', 'np.min', (['ys'], {}), '(ys)\n', (3540, 3544), True, 'import numpy as np\n'), ((3578, 3588), 'numpy.min', 'np.min', (['xs'], {}), '(xs)\n', (3584, 3588), True, 'import numpy as np\n'), ((3623, 3633), 'numpy.max', 'np.max', (['ys'], {}), '(ys)\n', (3629, 3633), True, 'import numpy as np\n'), ((3668, 3678), 'numpy.max', 'np.max', (['xs'], {}), '(xs)\n', (3674, 3678), True, 'import numpy as np\n'), ((4552, 4577), 'torch.FloatTensor', 'torch.FloatTensor', (['tensor'], {}), '(tensor)\n', (4569, 4577), False, 'import torch\n'), ((4968, 5001), 'utils.mesh.cvt_fim_enc', 'mesh.cvt_fim_enc', (['fim_i', 'map_name'], {}), '(fim_i, map_name)\n', (4984, 5001), True, 'import utils.mesh as mesh\n'), ((5795, 5820), 'numpy.repeat', 'np.repeat', (['im', '(3)'], {'axis': '(-1)'}), '(im, 3, axis=-1)\n', (5804, 5820), True, 'import numpy as np\n'), ((6049, 6069), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (6063, 6069), False, 'import os\n'), ((6079, 6096), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (6090, 6096), False, 'import os\n'), ((6191, 6210), 'shutil.rmtree', 'shutil.rmtree', (['path'], {}), '(path)\n', (6204, 6210), False, 'import shutil\n'), ((6288, 6315), 'os.path.dirname', 'os.path.dirname', (['image_path'], {}), '(image_path)\n', (6303, 6315), False, 'import os\n'), ((6478, 6511), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (6489, 6511), False, 'import pickle\n'), ((6620, 6658), 'pickle.dump', 'pickle.dump', (['data_dict', 'fp'], {'protocol': '(2)'}), '(data_dict, fp, protocol=2)\n', (6631, 6658), False, 'import pickle\n'), ((872, 927), 'cv2.resize', 'cv2.resize', (['image', '(self.output_size, self.output_size)'], {}), '(image, (self.output_size, self.output_size))\n', (882, 927), False, 'import cv2\n'), ((1055, 1085), 'numpy.transpose', 'np.transpose', (['image', '(2, 0, 1)'], {}), '(image, (2, 0, 1))\n', (1067, 1085), True, 'import numpy as np\n'), ((5337, 5376), 'torchvision.utils.make_grid', 'torchvision.utils.make_grid', (['img', 'nrows'], {}), '(img, nrows)\n', (5364, 5376), False, 'import torchvision\n'), ((1678, 1708), 'torch.Tensor', 'torch.Tensor', (["sample['images']"], {}), "(sample['images'])\n", (1690, 1708), False, 'import torch\n'), ((1743, 1772), 'torch.Tensor', 'torch.Tensor', (["sample['smpls']"], {}), "(sample['smpls'])\n", (1755, 1772), False, 'import torch\n'), ((1957, 1987), 'torch.Tensor', 'torch.Tensor', (["sample['images']"], {}), "(sample['images'])\n", (1969, 1987), False, 'import torch\n'), ((2021, 2049), 'torch.Tensor', 'torch.Tensor', (["sample['smpl']"], {}), "(sample['smpl'])\n", (2033, 2049), False, 'import torch\n'), ((2082, 2109), 'torch.Tensor', 'torch.Tensor', (["sample['uvs']"], {}), "(sample['uvs'])\n", (2094, 2109), False, 'import torch\n'), ((2143, 2171), 'torch.Tensor', 'torch.Tensor', (["sample['mask']"], {}), "(sample['mask'])\n", (2155, 2171), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/MedicinalProductUndesirableEffect
Release: R4
Version: 4.0.1
Build ID: 9346c8cc45
Last updated: 2019-11-01T09:29:23.356+11:00
"""
from typing import List as ListType
from pydantic import Field
from . import domainresource, fhirtypes
class MedicinalProductUndesirableEffect(domainresource.DomainResource):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
MedicinalProductUndesirableEffect.
Describe the undesirable effects of the medicinal product.
"""
resource_type = Field("MedicinalProductUndesirableEffect", const=True)
classification: fhirtypes.CodeableConceptType = Field(
None,
alias="classification",
title="Classification of the effect",
description=None,
# if property is element of this resource.
element_property=True,
)
frequencyOfOccurrence: fhirtypes.CodeableConceptType = Field(
None,
alias="frequencyOfOccurrence",
title="The frequency of occurrence of the effect",
description=None,
# if property is element of this resource.
element_property=True,
)
population: ListType[fhirtypes.PopulationType] = Field(
None,
alias="population",
title="The population group to which this applies",
description=None,
# if property is element of this resource.
element_property=True,
)
subject: ListType[fhirtypes.ReferenceType] = Field(
None,
alias="subject",
title="The medication for which this is an indication",
description=None,
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["MedicinalProduct", "Medication"],
)
symptomConditionEffect: fhirtypes.CodeableConceptType = Field(
None,
alias="symptomConditionEffect",
title="The symptom, condition or undesirable effect",
description=None,
# if property is element of this resource.
element_property=True,
)
|
[
"pydantic.Field"
] |
[((701, 755), 'pydantic.Field', 'Field', (['"""MedicinalProductUndesirableEffect"""'], {'const': '(True)'}), "('MedicinalProductUndesirableEffect', const=True)\n", (706, 755), False, 'from pydantic import Field\n'), ((809, 927), 'pydantic.Field', 'Field', (['None'], {'alias': '"""classification"""', 'title': '"""Classification of the effect"""', 'description': 'None', 'element_property': '(True)'}), "(None, alias='classification', title='Classification of the effect',\n description=None, element_property=True)\n", (814, 927), False, 'from pydantic import Field\n'), ((1082, 1225), 'pydantic.Field', 'Field', (['None'], {'alias': '"""frequencyOfOccurrence"""', 'title': '"""The frequency of occurrence of the effect"""', 'description': 'None', 'element_property': '(True)'}), "(None, alias='frequencyOfOccurrence', title=\n 'The frequency of occurrence of the effect', description=None,\n element_property=True)\n", (1087, 1225), False, 'from pydantic import Field\n'), ((1369, 1502), 'pydantic.Field', 'Field', (['None'], {'alias': '"""population"""', 'title': '"""The population group to which this applies"""', 'description': 'None', 'element_property': '(True)'}), "(None, alias='population', title=\n 'The population group to which this applies', description=None,\n element_property=True)\n", (1374, 1502), False, 'from pydantic import Field\n'), ((1642, 1837), 'pydantic.Field', 'Field', (['None'], {'alias': '"""subject"""', 'title': '"""The medication for which this is an indication"""', 'description': 'None', 'element_property': '(True)', 'enum_reference_types': "['MedicinalProduct', 'Medication']"}), "(None, alias='subject', title=\n 'The medication for which this is an indication', description=None,\n element_property=True, enum_reference_types=['MedicinalProduct',\n 'Medication'])\n", (1647, 1837), False, 'from pydantic import Field\n'), ((2064, 2211), 'pydantic.Field', 'Field', (['None'], {'alias': '"""symptomConditionEffect"""', 'title': '"""The symptom, condition or undesirable effect"""', 'description': 'None', 'element_property': '(True)'}), "(None, alias='symptomConditionEffect', title=\n 'The symptom, condition or undesirable effect', description=None,\n element_property=True)\n", (2069, 2211), False, 'from pydantic import Field\n')]
|
from datetime import datetime
import boto3
from django.conf import settings
def invalidate_paths(path_list):
if not hasattr(settings, 'ZAPPA_STAGE'):
return
cloudfront = boto3.client('cloudfront')
cloudfront.create_invalidation(
DistributionId=settings.AWS_CLOUD_FRONT_ID,
InvalidationBatch={
'Paths': {
'Quantity': len(path_list),
'Items': path_list
},
'CallerReference': 'django-invalidate-{}'.format(datetime.now())
}
)
|
[
"datetime.datetime.now",
"boto3.client"
] |
[((189, 215), 'boto3.client', 'boto3.client', (['"""cloudfront"""'], {}), "('cloudfront')\n", (201, 215), False, 'import boto3\n'), ((511, 525), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (523, 525), False, 'from datetime import datetime\n')]
|
from django.forms import ModelForm
from ftft.canzoni.models import canzone
from django import forms
class songform(ModelForm):
gruppo = forms.CharField()
genere = forms.CharField()
def __init__(self, *args, **kwargs):
super(songform, self).__init__(*args, **kwargs)
# Making name required
self.fields['moood'].label = 'Mood'
class Meta:
model = canzone
exclude = ["contenuto"]
|
[
"django.forms.CharField"
] |
[((141, 158), 'django.forms.CharField', 'forms.CharField', ([], {}), '()\n', (156, 158), False, 'from django import forms\n'), ((169, 186), 'django.forms.CharField', 'forms.CharField', ([], {}), '()\n', (184, 186), False, 'from django import forms\n')]
|
# modusite
# Copyright (c) 2006-2010 <NAME>
# http://modu.bubblehouse.org
#
#
from modu.persist import storable, sql
from modusite.model import release
class Project(storable.Storable):
def __init__(self):
super(Project, self).__init__('project')
def get_releases(self):
store = self.get_store()
store.ensure_factory('release', release.Release)
return store.load('release', dict(
project_id = self.get_id(),
nightly = sql.RAW('IFNULL(%s, 0) = 0'),
active = 1,
__order_by = "version_weight DESC",
))
def get_nightly(self):
store = self.get_store()
store.ensure_factory('release', release.Release)
return store.load_one('release', dict(
project_id = self.get_id(),
nightly = 1,
active = 1,
__order_by = "version_weight DESC",
))
|
[
"modu.persist.sql.RAW"
] |
[((439, 467), 'modu.persist.sql.RAW', 'sql.RAW', (['"""IFNULL(%s, 0) = 0"""'], {}), "('IFNULL(%s, 0) = 0')\n", (446, 467), False, 'from modu.persist import storable, sql\n')]
|
import random
from game.board import Board
def minimax(board: Board, depth, max_player):
if depth == 0 or board.winner() != 0:
# max_player = True <--- FOX
return evaluate(board), 0 # [(0,0), (0,0)]
moves = all_moves(board, max_player)
random.shuffle(moves)
if max_player:
value = -100
best_move = None
for move in moves:
board.move_piece(
board.get_piece(move[0][0], move[0][1]), move[1][0], move[1][1]
)
temp_value = minimax(board, depth - 1, False)[0]
# print(f"Temp: {temp_value}")
if temp_value > value:
value = temp_value
best_move = move
board.move_back(
board.get_piece(move[1][0], move[1][1]), move[0][0], move[0][1]
)
else:
value = 100
best_move = None
for move in moves:
board.move_piece(
board.get_piece(move[0][0], move[0][1]), move[1][0], move[1][1]
)
temp_value = minimax(board, depth - 1, True)[0]
if temp_value < value:
value = temp_value
best_move = move
board.move_back(
board.get_piece(move[1][0], move[1][1]), move[0][0], move[0][1]
)
return value, best_move
def evaluate(board):
# sprawdza czy jest winner
# + TO WYGRANA FOXA/WILKA
# - TO WYGRANA HOUNDÓW/OWIEC
if board.winner() != 0:
return board.winner()
column_values = {0: -4, 1: -3, 2: -2, 3: -1, 4: 1, 5: 2, 6: 3, 7: 4}
column_q = 0
value = 0
fox = board.get_pieces()[0]
hounds = board.get_pieces()[1:]
if fox_behind_hound(board):
value += 0.2
# if fox_touching_hound(board):
# value -= 0.4
if all([h.row for h in hounds]):
value -= 0.4
# evaluate distance between fox and hounds
value -= 1 / sum(distance(fox, h) for h in hounds)
# evaluate fox row
value += 0.1 / fox.row
for h in hounds:
column_q += column_values[h.col]
# column distribution
col_distribution = abs(column_q) * 0.05
value += col_distribution
# # print(col_distribution)
minmax_condition = (max(h.row for h in hounds) - min(h.row for h in hounds)) * 0.05
value += minmax_condition
# print(minmax_condition)
# value += (max(h.row for h in hounds) - min(h.row for h in hounds))*0.1
# value += random.randint(-10, 10)*0.01
return value
def distance(fox, hound):
return (fox.row - hound.row) ** 2 + (fox.col - hound.col) ** 2
# return ((fox.row - hound.row)**2 + (fox.col - hound.col)**2)**(1/2)
# można jednocześnie sprawdzać kilka warunków w forze
def fox_behind_hound(board: Board):
behind = False
fox = board.get_pieces()[0]
hounds = board.get_pieces()[1:]
for h in hounds:
if fox.row < h.row:
behind = True
return behind
def fox_touching_hound(board):
touching = False
fox = board.get_pieces()[0]
hounds = board.get_pieces()[1:]
for h in hounds:
if fox.row + 1 == h.row:
if fox.col - 1 == h.col or fox.col + 1 == h.col:
touching = True
return touching
def all_moves(board: Board, max_player):
moves = []
# z którego na które pole
# [(1, 6), (1, 8)]
if max_player:
moves = moves_of_singe_piece(board, board.get_pieces()[0])
# print(f"m{moves}")
else:
pieces = board.get_pieces()[1:]
for p in pieces:
for m in moves_of_singe_piece(board, p):
moves.append(m)
# print(moves)
return moves
def moves_of_singe_piece(board, piece):
moves = []
current_pos = (piece.row, piece.col)
for move in board.get_valid_moves(piece):
moves.append([current_pos, move])
return moves
|
[
"random.shuffle"
] |
[((269, 290), 'random.shuffle', 'random.shuffle', (['moves'], {}), '(moves)\n', (283, 290), False, 'import random\n')]
|
import six
from tabulate import tabulate
__all__ = ["dict_to_string", "merge_as_list",
"ask_to_proceed_with_overwrite", "create_table"]
def create_table(small_dict):
"""
Create a small table using the keys of small_dict as headers. This is only
suitable for small dictionaries.
Args:
small_dict (dict): a result dictionary of only a few items.
Returns:
str: the table as a string.
"""
keys, values = tuple(zip(*small_dict.items()))
table = tabulate(
[values],
headers=keys,
tablefmt="pipe",
floatfmt=".3f",
stralign="center",
numalign="center",
)
return table
def dict_to_string(d, fmt="%.4f"):
s = ""
for k, v in d.items():
fmt_string = "%s: " + fmt + " - "
s += fmt_string % (k, v)
if d:
s = s[:-2]
return s
def merge_as_list(*args):
out = []
for x in args:
if x is not None:
if isinstance(x, (list, tuple)):
out += x
else:
out += [x]
return out
def ask_to_proceed_with_overwrite(filepath):
"""Produces a prompt asking about overwriting a file.
Parameters:
filepath: the path to the file to be overwritten.
Returns:
True if we can proceed with overwrite, False otherwise.
"""
overwrite = six.moves.input('[WARNING] %s already exists - overwrite? '
'[y/n]' % (filepath)).strip().lower()
while overwrite not in ('y', 'n'):
overwrite = six.moves.input('Enter "y" (overwrite) or "n" '
'(cancel).').strip().lower()
if overwrite == 'n':
return False
print('[TIP] Next time specify overwrite=True!')
return True
|
[
"tabulate.tabulate",
"six.moves.input"
] |
[((491, 599), 'tabulate.tabulate', 'tabulate', (['[values]'], {'headers': 'keys', 'tablefmt': '"""pipe"""', 'floatfmt': '""".3f"""', 'stralign': '"""center"""', 'numalign': '"""center"""'}), "([values], headers=keys, tablefmt='pipe', floatfmt='.3f', stralign=\n 'center', numalign='center')\n", (499, 599), False, 'from tabulate import tabulate\n'), ((1352, 1428), 'six.moves.input', 'six.moves.input', (["('[WARNING] %s already exists - overwrite? [y/n]' % filepath)"], {}), "('[WARNING] %s already exists - overwrite? [y/n]' % filepath)\n", (1367, 1428), False, 'import six\n'), ((1541, 1598), 'six.moves.input', 'six.moves.input', (['"""Enter "y" (overwrite) or "n" (cancel)."""'], {}), '(\'Enter "y" (overwrite) or "n" (cancel).\')\n', (1556, 1598), False, 'import six\n')]
|
# coding: utf-8
from __future__ import unicode_literals
import re
from functools import partial
alphabet_ru = {
'а': 'а',
'б': 'б6',
'в': 'в',
'г': 'г',
'д': 'д',
'е': 'е',
'ё': 'ё',
'ж': 'ж',
'з': 'з',
'и': 'и',
'й': 'й',
'к': 'к',
'л': 'л',
'м': 'м',
'н': 'н',
'о': 'о',
'п': 'п',
'р': 'р',
'с': 'с',
'т': 'т',
'у': 'у',
'ф': 'ф',
'х': 'х',
'ц': 'ц',
'ч': 'ч',
'ъ': 'ъ',
'ы': 'ы',
'ь': 'ь',
'э': 'э',
'ю': 'ю',
'я': 'я',
}
def variants_of_letter(alphabet, letter):
letters = alphabet.get(letter, letter)
return '|'.join(letters.split())
ru_variants_of_letter = partial(variants_of_letter, alphabet_ru)
def build_bad_phrase(*symbols, **kwargs):
"""
Построить регулярную фразу из символов.
Между символами могут располагаться пробелы или любые не−кириллические символы.
Фраза возвращается в виде группы.
"""
variants_func = kwargs.get('variants_func', ru_variants_of_letter)
separator = '(?:[^а-я])*' # non-capturing group
if len(symbols) == 1:
symbols = symbols[0].split()
symbol_regexp = []
for symbol in symbols:
if len(symbol) == 1:
symbol = [symbol]
parts = [variants_func(i) for i in symbol]
symbol_regexp.append('[{}]+'.format('|'.join(parts)))
return r'[а-я]*({})[а-я]*'.format(separator.join(symbol_regexp))
def build_good_phrase(*symbols):
if len(symbols) == 1:
symbols = symbols[0].split()
out = []
for symbol in symbols:
out.append('[{}]'.format(symbol))
return r'({})'.format(''.join(out))
|
[
"functools.partial"
] |
[((702, 742), 'functools.partial', 'partial', (['variants_of_letter', 'alphabet_ru'], {}), '(variants_of_letter, alphabet_ru)\n', (709, 742), False, 'from functools import partial\n')]
|