code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
import json
import logging
import time
from pathlib import Path
from django.core.mail import EmailMultiAlternatives, get_connection
from django.core.management import BaseCommand
from slack_sdk import WebClient, errors
from database_locks import locked
from notifications.models import Notification, Subscription
from django.conf import settings
logger = logging.getLogger(__name__)
@locked
class Command(BaseCommand):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__sc = WebClient(settings.NOTIFICATIONS_SLACK_APP_TOKEN)
self.__slack_limited = time.time()
def add_arguments(self, parser):
parser.add_argument('-1', '--run-once', action='store_true', default=False, help='Run only one check')
def handle_tick(self):
for notification in Notification.objects.filter(status=Notification.STATUS_PENDING).select_related(
'subscription'
):
try:
if notification.subscription.service == Subscription.Service.SLACK:
if self.__slack_limited < time.time():
self.__send_slack_notifications(notification)
elif notification.subscription.service == Subscription.Service.MAIL:
self.__send_email_notifications(notification)
else:
notification.status = Notification.STATUS_ERROR
logger.error(
'notify failed - %d - bad service %s', notification.pk, notification.subscription.service
)
except Exception as e:
notification.status = Notification.STATUS_ERROR
notification.save()
logger.exception(e)
def handle(self, *args, **options):
"""
Main method that starts the infinite loop to fetch for pending notifications.
When those exists, then it calls its sub methods to send Slack and Email notifications according to their types.
"""
while True:
self.handle_tick()
if options['run_once']:
break
time.sleep(1)
def __send_slack_notifications(self, notification):
"""
Method responsible for handling slack notifications.
Provided a single notifications that still is in pending state and that its subscription type is slack,
then the method sends it using the slackclient module.
If the notification is sent successfully then it updates its Status field to to Status_SENT. Otherwise,
the Notification's Status property is changed to STATUS_ERROR.
:param notification: Set of notifications with SLACK subscription and PENDING status.
"""
try:
self.__sc.chat_postMessage(
# text still required for message preview (in notifications)
text=notification.message,
channel=notification.target,
**notification.slack_options,
)
notification.status = Notification.STATUS_SENT
notification.save(update_fields=['status'])
except errors.SlackApiError as e:
if e.response.get('error') == 'ratelimited':
# handle rate limit
try:
retry_after = int(e.response.headers.get('retry-after')) + 5
except (ValueError, TypeError):
# if no header (weird), wait 15s
retry_after = 15
self.__slack_limited = time.time() + retry_after
logger.warning('rate limited on %d - waiting %d secs', notification.pk, retry_after)
else:
notification.status = Notification.STATUS_ERROR
logger.error('notify failed - %d - %s', notification.pk, e.response.get('error'))
notification.save(update_fields=['status'])
def __send_email_notifications(self, notification):
"""
Method responsible for sending email notifications.
Provided a single notification that still is in pending state and that is subscription type mail,
then the method sends it to the list of targets using the Django.Core.Email dependency.
If the email is sent successfully, then the Notification's status is changed to STATUS_SENT. Otherwise,
the Notification's status property is changed to STATUS_ERROR.
:param notification: Single notification of MAIL subscription with PENDING status.
"""
email_args = json.loads(notification.options)
with get_connection() as connection:
msg = EmailMultiAlternatives(
subject=email_args.get('subject'),
body=notification.message,
from_email=email_args.get('from_email'),
to=json.loads(notification.target),
reply_to=email_args.get('reply_to'),
connection=connection,
)
if email_args.get("attachments"):
MEDIA_ROOT = Path(settings.MEDIA_ROOT).resolve()
for attach in email_args.get("attachments"):
path = Path(attach[1]).resolve()
try:
# check if path is relative to MEDIA_ROOT
path.relative_to(MEDIA_ROOT)
except ValueError:
logger.error('invalid path for attachment: %s', path)
continue
if path.exists() and not path.is_dir():
with path.open() as attachment:
msg.attach(attach[0], attachment.read(), attach[2])
else:
logger.error('could not open file from path: %s', path)
if email_args.get('html_message'):
msg.attach_alternative(email_args.get('html_message'), 'text/html')
msg.send()
notification.status = Notification.STATUS_SENT
notification.save(update_fields=['status'])
| [
"logging.getLogger",
"json.loads",
"pathlib.Path",
"time.sleep",
"notifications.models.Notification.objects.filter",
"slack_sdk.WebClient",
"django.core.mail.get_connection",
"time.time"
] | [((358, 385), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (375, 385), False, 'import logging\n'), ((527, 576), 'slack_sdk.WebClient', 'WebClient', (['settings.NOTIFICATIONS_SLACK_APP_TOKEN'], {}), '(settings.NOTIFICATIONS_SLACK_APP_TOKEN)\n', (536, 576), False, 'from slack_sdk import WebClient, errors\n'), ((608, 619), 'time.time', 'time.time', ([], {}), '()\n', (617, 619), False, 'import time\n'), ((4568, 4600), 'json.loads', 'json.loads', (['notification.options'], {}), '(notification.options)\n', (4578, 4600), False, 'import json\n'), ((2148, 2161), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2158, 2161), False, 'import time\n'), ((4614, 4630), 'django.core.mail.get_connection', 'get_connection', ([], {}), '()\n', (4628, 4630), False, 'from django.core.mail import EmailMultiAlternatives, get_connection\n'), ((825, 888), 'notifications.models.Notification.objects.filter', 'Notification.objects.filter', ([], {'status': 'Notification.STATUS_PENDING'}), '(status=Notification.STATUS_PENDING)\n', (852, 888), False, 'from notifications.models import Notification, Subscription\n'), ((4858, 4889), 'json.loads', 'json.loads', (['notification.target'], {}), '(notification.target)\n', (4868, 4889), False, 'import json\n'), ((1090, 1101), 'time.time', 'time.time', ([], {}), '()\n', (1099, 1101), False, 'import time\n'), ((3563, 3574), 'time.time', 'time.time', ([], {}), '()\n', (3572, 3574), False, 'import time\n'), ((5072, 5097), 'pathlib.Path', 'Path', (['settings.MEDIA_ROOT'], {}), '(settings.MEDIA_ROOT)\n', (5076, 5097), False, 'from pathlib import Path\n'), ((5196, 5211), 'pathlib.Path', 'Path', (['attach[1]'], {}), '(attach[1])\n', (5200, 5211), False, 'from pathlib import Path\n')] |
# -*- coding: utf-8 -*-
from openre.agent.decorators import action
import logging
from openre.agent.client.helpers import Net
@action(namespace='client')
def deploy(agent):
logging.debug('Run Net')
config = agent.net_config
if not config:
raise ValueError('No net config')
net = None
try:
logging.info('Creating Net')
net = Net(config)
net.create()
net.upload_config()
net.deploy_domains()
net.deploy_layers()
net.deploy_neurons()
net.pre_deploy_synapses()
logging.info('Start creating neurons and synapses.' \
' This may take a while.')
net.deploy_synapses()
logging.info('Upload data to devices')
net.post_deploy_synapses()
net.post_deploy()
logging.info('Deploy done')
except:
if net:
logging.info('Destroying Net')
net.destroy()
net.clean()
raise
| [
"logging.info",
"logging.debug",
"openre.agent.decorators.action",
"openre.agent.client.helpers.Net"
] | [((129, 155), 'openre.agent.decorators.action', 'action', ([], {'namespace': '"""client"""'}), "(namespace='client')\n", (135, 155), False, 'from openre.agent.decorators import action\n'), ((179, 203), 'logging.debug', 'logging.debug', (['"""Run Net"""'], {}), "('Run Net')\n", (192, 203), False, 'import logging\n'), ((327, 355), 'logging.info', 'logging.info', (['"""Creating Net"""'], {}), "('Creating Net')\n", (339, 355), False, 'import logging\n'), ((370, 381), 'openre.agent.client.helpers.Net', 'Net', (['config'], {}), '(config)\n', (373, 381), False, 'from openre.agent.client.helpers import Net\n'), ((559, 634), 'logging.info', 'logging.info', (['"""Start creating neurons and synapses. This may take a while."""'], {}), "('Start creating neurons and synapses. This may take a while.')\n", (571, 634), False, 'import logging\n'), ((699, 737), 'logging.info', 'logging.info', (['"""Upload data to devices"""'], {}), "('Upload data to devices')\n", (711, 737), False, 'import logging\n'), ((807, 834), 'logging.info', 'logging.info', (['"""Deploy done"""'], {}), "('Deploy done')\n", (819, 834), False, 'import logging\n'), ((875, 905), 'logging.info', 'logging.info', (['"""Destroying Net"""'], {}), "('Destroying Net')\n", (887, 905), False, 'import logging\n')] |
# -*- coding: utf-8 -*-
"""Hello module."""
import platform
import sys
def get_hello():
system = platform.system()
py_version = sys.version_info.major
if system == "Windows":
if py_version < 3:
return "Hello Windows, I'm Python2 or earlier!"
else:
return "Hello Windows, I'm Python3 or later!"
elif system == "Darwin":
if py_version < 3:
return "Hello Mac OSX, I'm Python2 or earlier!"
else:
return "Hello Mac OSX, I'm Python3 or later!"
else:
if py_version < 3:
return "Hello {}, I'm Python2 or earlier!".format(system)
else:
return "Hello {}, I'm Python3 or later!".format(system)
| [
"platform.system"
] | [((105, 122), 'platform.system', 'platform.system', ([], {}), '()\n', (120, 122), False, 'import platform\n')] |
import numpy
import pytest
import orthopy
import quadpy
from helpers import check_degree_ortho
schemes = [
quadpy.e2r2.haegemans_piessens_a(),
quadpy.e2r2.haegemans_piessens_b(),
quadpy.e2r2.rabinowitz_richter_1(),
quadpy.e2r2.rabinowitz_richter_2(),
quadpy.e2r2.rabinowitz_richter_3(),
quadpy.e2r2.rabinowitz_richter_4(),
quadpy.e2r2.rabinowitz_richter_5(),
quadpy.e2r2.stroud_4_1(),
quadpy.e2r2.stroud_5_1(),
quadpy.e2r2.stroud_5_2(),
quadpy.e2r2.stroud_7_1(),
quadpy.e2r2.stroud_7_2(),
quadpy.e2r2.stroud_9_1(),
quadpy.e2r2.stroud_11_1(),
quadpy.e2r2.stroud_11_2(),
quadpy.e2r2.stroud_13_1(),
quadpy.e2r2.stroud_15_1(),
quadpy.e2r2.stroud_secrest_5(),
quadpy.e2r2.stroud_secrest_6(),
]
@pytest.mark.parametrize("scheme", schemes)
def test_scheme(scheme, tol=1.0e-14):
assert scheme.points.dtype == numpy.float64, scheme.name
assert scheme.weights.dtype == numpy.float64, scheme.name
# degree = check_degree(
# lambda poly: scheme.integrate(poly),
# integrate_monomial_over_enr2,
# 2,
# scheme.degree + 1,
# tol=tol,
# )
# assert degree == scheme.degree, "{} Observed: {} expected: {}".format(
# scheme.name, degree, scheme.degree
# )
def eval_orthopolys(x):
return numpy.concatenate(
orthopy.e2r2.tree(x, scheme.degree + 1, symbolic=False)
)
vals = scheme.integrate(eval_orthopolys)
# Put vals back into the tree structure:
# len(approximate[k]) == k+1
approximate = [
vals[k * (k + 1) // 2 : (k + 1) * (k + 2) // 2]
for k in range(scheme.degree + 2)
]
exact = [numpy.zeros(k + 1) for k in range(scheme.degree + 2)]
exact[0][0] = numpy.sqrt(numpy.pi)
degree = check_degree_ortho(approximate, exact, abs_tol=tol)
assert degree >= scheme.degree, "{} -- Observed: {}, expected: {}".format(
scheme.name, degree, scheme.degree
)
return
@pytest.mark.parametrize("scheme", [quadpy.e2r2.rabinowitz_richter_1()])
def test_show(scheme):
scheme.show()
return
if __name__ == "__main__":
# scheme_ = quadpy.e2r2.Stroud["7-2"]()
# test_scheme(scheme_, 1.0e-14)
# test_show(scheme_)
from helpers import find_equal
find_equal(schemes)
| [
"quadpy.e2r2.rabinowitz_richter_3",
"quadpy.e2r2.rabinowitz_richter_4",
"quadpy.e2r2.stroud_15_1",
"numpy.sqrt",
"quadpy.e2r2.haegemans_piessens_b",
"quadpy.e2r2.rabinowitz_richter_1",
"quadpy.e2r2.stroud_4_1",
"quadpy.e2r2.rabinowitz_richter_2",
"quadpy.e2r2.rabinowitz_richter_5",
"quadpy.e2r2.ha... | [((770, 812), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""scheme"""', 'schemes'], {}), "('scheme', schemes)\n", (793, 812), False, 'import pytest\n'), ((113, 147), 'quadpy.e2r2.haegemans_piessens_a', 'quadpy.e2r2.haegemans_piessens_a', ([], {}), '()\n', (145, 147), False, 'import quadpy\n'), ((153, 187), 'quadpy.e2r2.haegemans_piessens_b', 'quadpy.e2r2.haegemans_piessens_b', ([], {}), '()\n', (185, 187), False, 'import quadpy\n'), ((193, 227), 'quadpy.e2r2.rabinowitz_richter_1', 'quadpy.e2r2.rabinowitz_richter_1', ([], {}), '()\n', (225, 227), False, 'import quadpy\n'), ((233, 267), 'quadpy.e2r2.rabinowitz_richter_2', 'quadpy.e2r2.rabinowitz_richter_2', ([], {}), '()\n', (265, 267), False, 'import quadpy\n'), ((273, 307), 'quadpy.e2r2.rabinowitz_richter_3', 'quadpy.e2r2.rabinowitz_richter_3', ([], {}), '()\n', (305, 307), False, 'import quadpy\n'), ((313, 347), 'quadpy.e2r2.rabinowitz_richter_4', 'quadpy.e2r2.rabinowitz_richter_4', ([], {}), '()\n', (345, 347), False, 'import quadpy\n'), ((353, 387), 'quadpy.e2r2.rabinowitz_richter_5', 'quadpy.e2r2.rabinowitz_richter_5', ([], {}), '()\n', (385, 387), False, 'import quadpy\n'), ((393, 417), 'quadpy.e2r2.stroud_4_1', 'quadpy.e2r2.stroud_4_1', ([], {}), '()\n', (415, 417), False, 'import quadpy\n'), ((423, 447), 'quadpy.e2r2.stroud_5_1', 'quadpy.e2r2.stroud_5_1', ([], {}), '()\n', (445, 447), False, 'import quadpy\n'), ((453, 477), 'quadpy.e2r2.stroud_5_2', 'quadpy.e2r2.stroud_5_2', ([], {}), '()\n', (475, 477), False, 'import quadpy\n'), ((483, 507), 'quadpy.e2r2.stroud_7_1', 'quadpy.e2r2.stroud_7_1', ([], {}), '()\n', (505, 507), False, 'import quadpy\n'), ((513, 537), 'quadpy.e2r2.stroud_7_2', 'quadpy.e2r2.stroud_7_2', ([], {}), '()\n', (535, 537), False, 'import quadpy\n'), ((543, 567), 'quadpy.e2r2.stroud_9_1', 'quadpy.e2r2.stroud_9_1', ([], {}), '()\n', (565, 567), False, 'import quadpy\n'), ((573, 598), 'quadpy.e2r2.stroud_11_1', 'quadpy.e2r2.stroud_11_1', ([], {}), '()\n', (596, 598), False, 'import quadpy\n'), ((604, 629), 'quadpy.e2r2.stroud_11_2', 'quadpy.e2r2.stroud_11_2', ([], {}), '()\n', (627, 629), False, 'import quadpy\n'), ((635, 660), 'quadpy.e2r2.stroud_13_1', 'quadpy.e2r2.stroud_13_1', ([], {}), '()\n', (658, 660), False, 'import quadpy\n'), ((666, 691), 'quadpy.e2r2.stroud_15_1', 'quadpy.e2r2.stroud_15_1', ([], {}), '()\n', (689, 691), False, 'import quadpy\n'), ((697, 727), 'quadpy.e2r2.stroud_secrest_5', 'quadpy.e2r2.stroud_secrest_5', ([], {}), '()\n', (725, 727), False, 'import quadpy\n'), ((733, 763), 'quadpy.e2r2.stroud_secrest_6', 'quadpy.e2r2.stroud_secrest_6', ([], {}), '()\n', (761, 763), False, 'import quadpy\n'), ((1770, 1790), 'numpy.sqrt', 'numpy.sqrt', (['numpy.pi'], {}), '(numpy.pi)\n', (1780, 1790), False, 'import numpy\n'), ((1805, 1856), 'helpers.check_degree_ortho', 'check_degree_ortho', (['approximate', 'exact'], {'abs_tol': 'tol'}), '(approximate, exact, abs_tol=tol)\n', (1823, 1856), False, 'from helpers import check_degree_ortho\n'), ((2298, 2317), 'helpers.find_equal', 'find_equal', (['schemes'], {}), '(schemes)\n', (2308, 2317), False, 'from helpers import find_equal\n'), ((1698, 1716), 'numpy.zeros', 'numpy.zeros', (['(k + 1)'], {}), '(k + 1)\n', (1709, 1716), False, 'import numpy\n'), ((2035, 2069), 'quadpy.e2r2.rabinowitz_richter_1', 'quadpy.e2r2.rabinowitz_richter_1', ([], {}), '()\n', (2067, 2069), False, 'import quadpy\n'), ((1370, 1425), 'orthopy.e2r2.tree', 'orthopy.e2r2.tree', (['x', '(scheme.degree + 1)'], {'symbolic': '(False)'}), '(x, scheme.degree + 1, symbolic=False)\n', (1387, 1425), False, 'import orthopy\n')] |
import unittest
from lcis import find_lcis_length
class LCISTests(unittest.TestCase):
"""Tests for longest continuous increasing subsequence challenge."""
def test_case_1(self):
self.assertEqual(find_lcis_length([1, 3, 5, 4, 7]), 3)
def test_case_2(self):
self.assertEqual(find_lcis_length([2, 2, 2, 2, 2]), 1)
def test_case_3(self):
self.assertEqual(find_lcis_length([1, 3, 5, 7]), 4)
def test_case_4(self):
self.assertEqual(find_lcis_length([1, 3, 5, 4, 2, 3, 4, 5]), 4)
def test_case_5(self):
self.assertEqual(find_lcis_length([2, 1, 3]), 2)
def test_case_6(self):
self.assertEqual(find_lcis_length([-1, -3, -5, -4, -7]), 2)
if __name__ == "__main__":
unittest.main(verbosity=2)
| [
"unittest.main",
"lcis.find_lcis_length"
] | [((747, 773), 'unittest.main', 'unittest.main', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (760, 773), False, 'import unittest\n'), ((216, 249), 'lcis.find_lcis_length', 'find_lcis_length', (['[1, 3, 5, 4, 7]'], {}), '([1, 3, 5, 4, 7])\n', (232, 249), False, 'from lcis import find_lcis_length\n'), ((307, 340), 'lcis.find_lcis_length', 'find_lcis_length', (['[2, 2, 2, 2, 2]'], {}), '([2, 2, 2, 2, 2])\n', (323, 340), False, 'from lcis import find_lcis_length\n'), ((398, 428), 'lcis.find_lcis_length', 'find_lcis_length', (['[1, 3, 5, 7]'], {}), '([1, 3, 5, 7])\n', (414, 428), False, 'from lcis import find_lcis_length\n'), ((486, 528), 'lcis.find_lcis_length', 'find_lcis_length', (['[1, 3, 5, 4, 2, 3, 4, 5]'], {}), '([1, 3, 5, 4, 2, 3, 4, 5])\n', (502, 528), False, 'from lcis import find_lcis_length\n'), ((586, 613), 'lcis.find_lcis_length', 'find_lcis_length', (['[2, 1, 3]'], {}), '([2, 1, 3])\n', (602, 613), False, 'from lcis import find_lcis_length\n'), ((671, 709), 'lcis.find_lcis_length', 'find_lcis_length', (['[-1, -3, -5, -4, -7]'], {}), '([-1, -3, -5, -4, -7])\n', (687, 709), False, 'from lcis import find_lcis_length\n')] |
import snap
Graph = snap.GenFull(snap.PNEANet, 10)
Src = 1
Dst = 2
EI = Graph.GetEI(Src,Dst)
EId = EI.GetId()
print(EId, Graph.GetEI(Src,Dst).GetId())
print(Graph.GetEI(Src,Dst).GetSrcNId(), Graph.GetEI(Src,Dst).GetDstNId())
print(Graph.GetEI(EId).GetSrcNId(), Graph.GetEI(EId).GetDstNId())
if EId != Graph.GetEI(Src,Dst).GetId():
print("*** error1")
if Graph.GetEI(Src,Dst).GetSrcNId() != Graph.GetEI(EId).GetSrcNId():
print("*** error2")
if Graph.GetEI(Src,Dst).GetDstNId() != Graph.GetEI(EId).GetDstNId():
print("*** error3")
| [
"snap.GenFull"
] | [((20, 50), 'snap.GenFull', 'snap.GenFull', (['snap.PNEANet', '(10)'], {}), '(snap.PNEANet, 10)\n', (32, 50), False, 'import snap\n')] |
from floodsystem.station import MonitoringStation
from floodsystem.geo import rivers_by_station_number
def test_rivers_by_station_number():
"""Test for Task1E functions"""
#create 4 test stations
station_id = "Test station_id"
measure_id = "Test measure_id"
label = "Test station"
coord = (0.0, 0.0)
typical_range = (0.0, 1.0)
town = "Test Town"
station1 = MonitoringStation(station_id, measure_id, label, coord, typical_range, "River A", town)
station2 = MonitoringStation(station_id, measure_id, label, coord, typical_range, "River A", town)
station3 = MonitoringStation(station_id, measure_id, label, coord, typical_range, "River B", town)
station4 = MonitoringStation(station_id, measure_id, label, coord, typical_range, "River C", town)
x = rivers_by_station_number([station1, station2, station3, station4], 1)
y = rivers_by_station_number([station1, station2, station3, station4], 2)
#test for N = 1, return most number of stations: A with 2
assert x == [("River A", 2)]
#test for N = 2, return most number of stations: A, B and C since they have the same number of stations
assert y == [("River A", 2), ("River B", 1), ("River C", 1)]
| [
"floodsystem.geo.rivers_by_station_number",
"floodsystem.station.MonitoringStation"
] | [((395, 486), 'floodsystem.station.MonitoringStation', 'MonitoringStation', (['station_id', 'measure_id', 'label', 'coord', 'typical_range', '"""River A"""', 'town'], {}), "(station_id, measure_id, label, coord, typical_range,\n 'River A', town)\n", (412, 486), False, 'from floodsystem.station import MonitoringStation\n'), ((498, 589), 'floodsystem.station.MonitoringStation', 'MonitoringStation', (['station_id', 'measure_id', 'label', 'coord', 'typical_range', '"""River A"""', 'town'], {}), "(station_id, measure_id, label, coord, typical_range,\n 'River A', town)\n", (515, 589), False, 'from floodsystem.station import MonitoringStation\n'), ((601, 692), 'floodsystem.station.MonitoringStation', 'MonitoringStation', (['station_id', 'measure_id', 'label', 'coord', 'typical_range', '"""River B"""', 'town'], {}), "(station_id, measure_id, label, coord, typical_range,\n 'River B', town)\n", (618, 692), False, 'from floodsystem.station import MonitoringStation\n'), ((704, 795), 'floodsystem.station.MonitoringStation', 'MonitoringStation', (['station_id', 'measure_id', 'label', 'coord', 'typical_range', '"""River C"""', 'town'], {}), "(station_id, measure_id, label, coord, typical_range,\n 'River C', town)\n", (721, 795), False, 'from floodsystem.station import MonitoringStation\n'), ((801, 870), 'floodsystem.geo.rivers_by_station_number', 'rivers_by_station_number', (['[station1, station2, station3, station4]', '(1)'], {}), '([station1, station2, station3, station4], 1)\n', (825, 870), False, 'from floodsystem.geo import rivers_by_station_number\n'), ((879, 948), 'floodsystem.geo.rivers_by_station_number', 'rivers_by_station_number', (['[station1, station2, station3, station4]', '(2)'], {}), '([station1, station2, station3, station4], 2)\n', (903, 948), False, 'from floodsystem.geo import rivers_by_station_number\n')] |
import logging
import os
from typing import Dict, Optional, List
import libinfinitton
from . import tasks
class Screen:
logger = logging.getLogger(__name__)
def __init__(self, task_list: Dict[str, tasks.BaseTask], name: str, config: dict = None):
self._task_list = task_list
self.name = name
self._keys: List[Optional[tasks.BaseTask]] = [None] * 15
if config is not None:
for key_index in range(15):
if 'key_' + str(key_index) in config:
task_name = config['key_' + str(key_index)]
self.set_task(key_index, task_name)
def __get_name(self):
return self.__name
def __set_name(self, name: str):
self.__name = name
name = property(__get_name, __set_name)
def set_task(self, key_index: int, task_name: str):
if task_name in self._task_list:
self._keys[key_index] = self._task_list[task_name.lower()]
else:
Screen.logger.warning('Unknown task: ' + task_name)
def get_key_task(self, key_index: int) -> Optional[tasks.BaseTask]:
return self._keys[key_index]
def get_config(self) -> Dict[str, str]:
config = {}
for key_index in range(15):
if self._keys[key_index] is not None:
config['key_' + str(key_index)] = self._keys[key_index].name
return config
def show(self, device: libinfinitton.Infinitton, config_path: str):
for key_index in range(15):
self.show_key(device, config_path, key_index)
def show_key(self, device: libinfinitton.Infinitton, config_path: str, key_index: int):
if self._keys[key_index] is None:
device.fill_color(key_index, 0, 0, 0)
else:
icon = self._keys[key_index].icon
if icon is None or icon == '':
Screen.logger.warning('No icon defined for action: %s of type %s' % (
self._keys[key_index].name, type(self._keys[key_index]).__name__))
device.fill_color(key_index, 255, 255, 255)
else:
if os.path.exists(icon) and os.path.isfile(icon):
# 1: absolute path: file exists at path
Screen.logger.debug('Icon for action is on absolute path: ' + self._keys[key_index].name)
device.fill_image_path(key_index, icon)
elif os.path.exists(os.path.join(config_path, icon)) and os.path.isfile(
os.path.join(config_path, icon)):
# 2: in config directory
Screen.logger.debug('Icon for action is in the config directory: ' + self._keys[key_index].name)
device.fill_image_path(key_index, os.path.join(config_path, icon))
elif os.path.exists(os.path.join(os.getcwd(), 'resources', 'icons', icon)) and os.path.isfile(
os.path.join(os.getcwd(), 'resources', 'icons', icon)):
# 3: in application resources
Screen.logger.debug('Icon for action is in application icon resources: ' + self._keys[key_index].name)
device.fill_image_path(key_index, os.path.join(os.getcwd(), 'resources', 'icons', icon))
else:
# Fallback: icon not found
Screen.logger.warning('Icon not found for action: %s: %s' % (self._keys[key_index].name, icon))
device.fill_color(key_index, 255, 255, 255)
def execute_key(self, key_index):
Screen.logger.info('Screen: %s executes task %s' % (self.name, key_index))
if self._keys[key_index] is not None:
self._keys[key_index].execute()
| [
"logging.getLogger",
"os.path.exists",
"os.path.join",
"os.getcwd",
"os.path.isfile"
] | [((138, 165), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (155, 165), False, 'import logging\n'), ((2129, 2149), 'os.path.exists', 'os.path.exists', (['icon'], {}), '(icon)\n', (2143, 2149), False, 'import os\n'), ((2154, 2174), 'os.path.isfile', 'os.path.isfile', (['icon'], {}), '(icon)\n', (2168, 2174), False, 'import os\n'), ((2442, 2473), 'os.path.join', 'os.path.join', (['config_path', 'icon'], {}), '(config_path, icon)\n', (2454, 2473), False, 'import os\n'), ((2519, 2550), 'os.path.join', 'os.path.join', (['config_path', 'icon'], {}), '(config_path, icon)\n', (2531, 2550), False, 'import os\n'), ((2769, 2800), 'os.path.join', 'os.path.join', (['config_path', 'icon'], {}), '(config_path, icon)\n', (2781, 2800), False, 'import os\n'), ((2851, 2862), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2860, 2862), False, 'import os\n'), ((2950, 2961), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2959, 2961), False, 'import os\n'), ((3233, 3244), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3242, 3244), False, 'import os\n')] |
import unittest
from automon.integrations.elasticsearch.config import ElasticsearchConfig, SnapshotBot, JVMBot
from automon.integrations.elasticsearch.client import ElasticsearchClient
from automon.integrations.elasticsearch.cleanup import Cleanup
from automon.integrations.elasticsearch.metrics import Metric, MetricTimestamp, Cluster
class Elasticsearch(unittest.TestCase):
e = ElasticsearchClient()
def test_create_document(self):
es = ElasticsearchClient()
from datetime import datetime
doc = {
'timestamp': datetime.now(),
'yohji': 'yamamoto'
}
if self.e.connected():
self.assertTrue(es.create_document(doc=doc))
self.assertTrue(es.search_summary())
def test_ElasticsearchClient(self):
e = ElasticsearchClient()
if e.connected():
self.assertTrue(e)
self.assertTrue(ElasticsearchClient)
self.assertTrue(e.ping())
self.assertTrue(e.get_indices())
self.assertFalse(e.delete_index(None))
self.assertFalse(e.search_indices(None))
else:
self.assertFalse(e.ping())
self.assertFalse(e.delete_index(None))
self.assertFalse(e.search_indices(None))
self.assertFalse(e.get_indices())
def test_Cleanup(self):
if self.e.connected():
self.assertTrue(Cleanup().get_indices())
self.assertFalse(Cleanup().search_indices(None))
# self.assertFalse(Cleanup().delete_indices(None))
else:
self.assertFalse(Cleanup().get_indices())
self.assertFalse(Cleanup().search_indices(None))
# self.assertFalse(Cleanup().delete_indices(f''))
def test_ElasticsearchConfig(self):
self.assertTrue(ElasticsearchConfig())
self.assertEqual(ElasticsearchConfig(), ElasticsearchConfig())
def test_SnapshotBot(self):
self.assertTrue(SnapshotBot())
def test_JVMBot(self):
self.assertTrue(JVMBot())
def test_Cluster(self):
self.assertTrue(Cluster)
def test_MetricTimestamp(self):
# metric = Metric()
self.assertTrue(MetricTimestamp)
# self.assertTrue(MetricTimestamp(metric)._time_now())
def test_Metric(self):
node = None
jvm = {
"timestamp": 1571330469200,
"uptime_in_millis": 12251148809,
"mem": {
"heap_used_in_bytes": 27551308288,
"heap_used_percent": 82,
"heap_committed_in_bytes": 33216266240,
"heap_max_in_bytes": 33216266240,
"non_heap_used_in_bytes": 162665664,
"non_heap_committed_in_bytes": 204083200,
"pools": {
"young": {
"used_in_bytes": 130099216,
"max_in_bytes": 558432256,
"peak_used_in_bytes": 558432256,
"peak_max_in_bytes": 558432256
},
"survivor": {
"used_in_bytes": 1132144,
"max_in_bytes": 69730304,
"peak_used_in_bytes": 69730304,
"peak_max_in_bytes": 69730304
},
"old": {
"used_in_bytes": 27420076928,
"max_in_bytes": 32588103680,
"peak_used_in_bytes": 30895029472,
"peak_max_in_bytes": 32588103680
}
}
},
"threading": {
"count": 141,
"peak_count": 223
},
"gc": {
"collectors": {
"young": {
"collection_count": 533686,
"collection_time_in_millis": 37099480
},
"old": {
"collection_count": 75872,
"collection_time_in_millis": 9588732
}
}
},
"buffer_pools": {
"mapped": {
"count": 7988,
"used_in_bytes": 3715149748692,
"total_capacity_in_bytes": 3715149748692
},
"direct": {
"count": 10146,
"used_in_bytes": 166764364,
"total_capacity_in_bytes": 166764363
}
},
"classes": {
"current_loaded_count": 17808,
"total_loaded_count": 18183,
"total_unloaded_count": 375
}
}
self.assertTrue(Metric)
self.assertRaises(TypeError, Metric)
if __name__ == '__main__':
unittest.main()
| [
"automon.integrations.elasticsearch.config.ElasticsearchConfig",
"automon.integrations.elasticsearch.client.ElasticsearchClient",
"automon.integrations.elasticsearch.cleanup.Cleanup",
"datetime.datetime.now",
"automon.integrations.elasticsearch.config.JVMBot",
"automon.integrations.elasticsearch.config.Sn... | [((387, 408), 'automon.integrations.elasticsearch.client.ElasticsearchClient', 'ElasticsearchClient', ([], {}), '()\n', (406, 408), False, 'from automon.integrations.elasticsearch.client import ElasticsearchClient\n'), ((4876, 4891), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4889, 4891), False, 'import unittest\n'), ((459, 480), 'automon.integrations.elasticsearch.client.ElasticsearchClient', 'ElasticsearchClient', ([], {}), '()\n', (478, 480), False, 'from automon.integrations.elasticsearch.client import ElasticsearchClient\n'), ((812, 833), 'automon.integrations.elasticsearch.client.ElasticsearchClient', 'ElasticsearchClient', ([], {}), '()\n', (831, 833), False, 'from automon.integrations.elasticsearch.client import ElasticsearchClient\n'), ((562, 576), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (574, 576), False, 'from datetime import datetime\n'), ((1824, 1845), 'automon.integrations.elasticsearch.config.ElasticsearchConfig', 'ElasticsearchConfig', ([], {}), '()\n', (1843, 1845), False, 'from automon.integrations.elasticsearch.config import ElasticsearchConfig, SnapshotBot, JVMBot\n'), ((1872, 1893), 'automon.integrations.elasticsearch.config.ElasticsearchConfig', 'ElasticsearchConfig', ([], {}), '()\n', (1891, 1893), False, 'from automon.integrations.elasticsearch.config import ElasticsearchConfig, SnapshotBot, JVMBot\n'), ((1895, 1916), 'automon.integrations.elasticsearch.config.ElasticsearchConfig', 'ElasticsearchConfig', ([], {}), '()\n', (1914, 1916), False, 'from automon.integrations.elasticsearch.config import ElasticsearchConfig, SnapshotBot, JVMBot\n'), ((1975, 1988), 'automon.integrations.elasticsearch.config.SnapshotBot', 'SnapshotBot', ([], {}), '()\n', (1986, 1988), False, 'from automon.integrations.elasticsearch.config import ElasticsearchConfig, SnapshotBot, JVMBot\n'), ((2042, 2050), 'automon.integrations.elasticsearch.config.JVMBot', 'JVMBot', ([], {}), '()\n', (2048, 2050), False, 'from automon.integrations.elasticsearch.config import ElasticsearchConfig, SnapshotBot, JVMBot\n'), ((1419, 1428), 'automon.integrations.elasticsearch.cleanup.Cleanup', 'Cleanup', ([], {}), '()\n', (1426, 1428), False, 'from automon.integrations.elasticsearch.cleanup import Cleanup\n'), ((1473, 1482), 'automon.integrations.elasticsearch.cleanup.Cleanup', 'Cleanup', ([], {}), '()\n', (1480, 1482), False, 'from automon.integrations.elasticsearch.cleanup import Cleanup\n'), ((1611, 1620), 'automon.integrations.elasticsearch.cleanup.Cleanup', 'Cleanup', ([], {}), '()\n', (1618, 1620), False, 'from automon.integrations.elasticsearch.cleanup import Cleanup\n'), ((1665, 1674), 'automon.integrations.elasticsearch.cleanup.Cleanup', 'Cleanup', ([], {}), '()\n', (1672, 1674), False, 'from automon.integrations.elasticsearch.cleanup import Cleanup\n')] |
import pytest
from .validators.pre import prevalidators
from .validators.post import postvalidators
validators = prevalidators + postvalidators
@pytest.mark.parametrize('validator', validators)
def test_valid(validator):
data = {
'name': 'Oleg',
'mail': '<EMAIL>',
'count': 20,
}
v = validator(data=data)
assert v.is_valid() is True
assert v.cleaned_data == data
@pytest.mark.parametrize('validator', validators)
def test_no_field(validator):
data = {
'name': 'Oleg',
'mail': '<EMAIL>',
}
v = validator(data=data)
assert v.is_valid() is False
assert v.errors
@pytest.mark.parametrize('validator', validators)
def test_invalid_int(validator):
data = {
'name': 'Oleg',
'mail': '<EMAIL>',
'count': 'lol',
}
v = validator(data=data)
assert v.is_valid() is False
assert v.errors
@pytest.mark.parametrize('validator', prevalidators)
def test_types_converting(validator):
data = {
'name': 'Oleg',
'mail': '<EMAIL>',
'count': '10',
}
v = validator(request=True, data=data)
assert v.is_valid() is True
assert not v.errors
assert 'count' in v.cleaned_data
assert v.cleaned_data['count'] == 10
@pytest.mark.parametrize('validator', prevalidators)
def test_explicit_keys(validator):
data = {
'name': 'Oleg',
'mail': '<EMAIL>',
'count': 10,
'junk': 'test',
}
v = validator(request=True, data=data)
assert v.is_valid() is True
assert not v.errors
assert 'junk' not in v.cleaned_data
| [
"pytest.mark.parametrize"
] | [((150, 198), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""validator"""', 'validators'], {}), "('validator', validators)\n", (173, 198), False, 'import pytest\n'), ((415, 463), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""validator"""', 'validators'], {}), "('validator', validators)\n", (438, 463), False, 'import pytest\n'), ((649, 697), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""validator"""', 'validators'], {}), "('validator', validators)\n", (672, 697), False, 'import pytest\n'), ((910, 961), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""validator"""', 'prevalidators'], {}), "('validator', prevalidators)\n", (933, 961), False, 'import pytest\n'), ((1273, 1324), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""validator"""', 'prevalidators'], {}), "('validator', prevalidators)\n", (1296, 1324), False, 'import pytest\n')] |
from typer.testing import CliRunner
from indic_transliteration.sanscript_cli import app
runner = CliRunner()
test_input = "rAmAyaNa"
expected_output = "rāmāyaṇa"
def test_argument_input():
result = runner.invoke(app, ["--from", "hk", "--to", "iast", test_input])
assert result.exit_code == 0
assert expected_output in result.stdout
def test_stdin_input():
result = runner.invoke(
app, ["--from", "hk", "--to", "iast", "--input-file", "-"], input=test_input
)
assert result.exit_code == 0
assert expected_output in result.stdout
def test_file_input(tmp_path):
test_input_file = tmp_path / "test_input_file.txt"
test_input_file.write_text(test_input)
result = runner.invoke(
app, ["--from", "hk", "--to", "iast", "--input-file", test_input_file]
)
assert result.exit_code == 0
assert expected_output in result.stdout
def test_file_output(tmp_path):
test_output_file = tmp_path / "test_file_output.txt"
result = runner.invoke(
app,
[
"--from",
"hk",
"--to",
"iast",
"--output-file",
test_output_file,
test_input,
],
)
assert result.exit_code == 0
assert f"Output written to: {test_output_file}" in result.stdout
assert test_output_file.read_text() == expected_output
| [
"typer.testing.CliRunner"
] | [((99, 110), 'typer.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (108, 110), False, 'from typer.testing import CliRunner\n')] |
import trio
from functools import partial
from .base_runner import BaseRunner
from .async_tools import raise_return, AsyncExecution
class TrioRunner(BaseRunner):
"""Runner for coroutines with :py:mod:`trio`"""
flavour = trio
def __init__(self):
self._nursery = None
super().__init__()
def register_payload(self, payload):
super().register_payload(partial(raise_return, payload))
def run_payload(self, payload):
execution = AsyncExecution(payload)
super().register_payload(execution.coroutine)
return execution.wait()
def _run(self):
return trio.run(self._await_all)
async def _await_all(self):
"""Async component of _run"""
delay = 0.0
# we run a top-level nursery that automatically reaps/cancels for us
async with trio.open_nursery() as nursery:
while self.running.is_set():
await self._start_payloads(nursery=nursery)
await trio.sleep(delay)
delay = min(delay + 0.1, 1.0)
# cancel the scope to cancel all payloads
nursery.cancel_scope.cancel()
async def _start_payloads(self, nursery):
"""Start all queued payloads"""
with self._lock:
for coroutine in self._payloads:
nursery.start_soon(coroutine)
self._payloads.clear()
await trio.sleep(0)
| [
"trio.open_nursery",
"functools.partial",
"trio.run",
"trio.sleep"
] | [((629, 654), 'trio.run', 'trio.run', (['self._await_all'], {}), '(self._await_all)\n', (637, 654), False, 'import trio\n'), ((394, 424), 'functools.partial', 'partial', (['raise_return', 'payload'], {}), '(raise_return, payload)\n', (401, 424), False, 'from functools import partial\n'), ((842, 861), 'trio.open_nursery', 'trio.open_nursery', ([], {}), '()\n', (859, 861), False, 'import trio\n'), ((1409, 1422), 'trio.sleep', 'trio.sleep', (['(0)'], {}), '(0)\n', (1419, 1422), False, 'import trio\n'), ((997, 1014), 'trio.sleep', 'trio.sleep', (['delay'], {}), '(delay)\n', (1007, 1014), False, 'import trio\n')] |
from mission.constants.missions import Gate, Path
from conf.vehicle import is_mainsub
HYDROPHONES_PINGER_DEPTH = 3.0
NONSURFACE_MIN_DEPTH = 0.6
# gate = Gate(
# depth=1.0,
# initial_approach_target_percent_of_screen=.45,
# gate_width_threshold=0.4,
# pre_spin_charge_dist=16 if is_mainsub else 12,
# spin_charge_dist=16 if is_mainsub else 12,
# post_spin_charge_dist=16 if is_mainsub else 12
# )
path = Path(
depth=1.0,
search_forward=6 if is_mainsub else 2,
search_stride = 10 if is_mainsub else 8,
search_right_first=True,
search_speed=0.1,
post_dist=2.5,
failure_back_up_dist=0.5 if is_mainsub else 0.1,
failure_back_up_speed=0.2 if is_mainsub else 0.1,
)
#dice = Dice(
# depth=3.3,
# max_depth=4,
# search_forward=3,
# search_stride=8,
# search_speed=0.1,
# min_dot_radius=0.03,
# ram_dist=1.0,
# ram_speed=0.1,
# rammed_back_up_timeout=20,
# lost_sight_back_up_timeout=5,
# search_default_zero_timeout=60,
#)
#
#highway = Highway(
# high_depth=1.0,
# low_depth=1.2,
# dist=6 if is_mainsub else 2,
# speed=0.4 if is_mainsub else 0.2,
#)
#
#track = Track(
# depth=1.6,
# slow_down_dist=5,
# max_speed=0.3 if is_mainsub else 0.2,
# min_speed=0.1,
# vision_frame_period=0.5,
#)
#
#roulette = Roulette(
# depth_search=1.0,
# depth_realign=2.5,
# depth_drop=3.0,
# heading_offset=30,
#)
#
#cash_in = CashIn(
# approach_funnel_depth=0.5,
# drop_approach_dist=0.2,
# # (right, left)
# drop_dvl_forward_correct_dist=(0.1, -0.13),
# drop_heading_correct=(0, -7),
# pick_up_both_depth=1.0,
# pick_up_search_depth_1=2.0,
# pick_up_search_depth_2=2.25,
# pick_up_search_depth_3=2.5,
# pick_up_start_follow_depth=3.2,
# attempt_surface_depth=-1,
# attempt_funnel_depth=0,
#)
| [
"mission.constants.missions.Path"
] | [((432, 695), 'mission.constants.missions.Path', 'Path', ([], {'depth': '(1.0)', 'search_forward': '(6 if is_mainsub else 2)', 'search_stride': '(10 if is_mainsub else 8)', 'search_right_first': '(True)', 'search_speed': '(0.1)', 'post_dist': '(2.5)', 'failure_back_up_dist': '(0.5 if is_mainsub else 0.1)', 'failure_back_up_speed': '(0.2 if is_mainsub else 0.1)'}), '(depth=1.0, search_forward=6 if is_mainsub else 2, search_stride=10 if\n is_mainsub else 8, search_right_first=True, search_speed=0.1, post_dist\n =2.5, failure_back_up_dist=0.5 if is_mainsub else 0.1,\n failure_back_up_speed=0.2 if is_mainsub else 0.1)\n', (436, 695), False, 'from mission.constants.missions import Gate, Path\n')] |
from os import path
from setuptools import find_packages, setup
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, "README.md"), encoding="utf-8") as f:
long_description = f.read()
with open(path.join(this_directory, "LICENSE"), encoding="utf-8") as f:
license_ = f.read()
setup(
name="hourly_price_prediction",
packages=find_packages(),
version="0.1.0",
description="Naive Algo-Trading application that uses a Data Science Model for predicting the hourly close price of an asset and then buying or selling that asset based on the prediction.",
author="<NAME>",
license=license_,
long_description=long_description,
long_description_content_type="text/markdown",
install_requires=[
"pandas>=1.3.1",
"scikit-learn>=0.24.2",
"cbpro>=1.1.4",
"boto3>=1.18.12",
],
)
| [
"os.path.dirname",
"setuptools.find_packages",
"os.path.join"
] | [((96, 118), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (108, 118), False, 'from os import path\n'), ((130, 168), 'os.path.join', 'path.join', (['this_directory', '"""README.md"""'], {}), "(this_directory, 'README.md')\n", (139, 168), False, 'from os import path\n'), ((237, 273), 'os.path.join', 'path.join', (['this_directory', '"""LICENSE"""'], {}), "(this_directory, 'LICENSE')\n", (246, 273), False, 'from os import path\n'), ((380, 395), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (393, 395), False, 'from setuptools import find_packages, setup\n')] |
import copy
import warnings
from Constants import TARGET_REGISTER, RAND_VALUE, FOCUS_REGISTER, ADDRESS, OPERAND_TYPE, OPERANDS, ISSUE_SLOT
from util.Processor import Processor
class Instruction:
def __init__(self, assembly: str, mutable=True):
# set default enabled features
self.isRandValueRandomImmediate = False
self.globalMandatoryFeatures = {}
self.mutable = mutable
self._assembly = assembly
self.parsestring = Processor().parseInstruction(assembly)
self.mandatoryEnabledFeatures = Processor().getMandatoryFeatures(assembly)
self.inst_name = self.parsestring[0]
self.features = self.getFeatures()
self._enabledfeatures = {}
# self.reset_features()
# Operands
self._operandAttr = {} # TARGET_REGISTER, FOCUS_REGISTER, RAND_VALUE
# parse assembly for return
self._instruction = self.parsestring[0]
self._operandStrings = self.parsestring[1]
self._originalOperandString = copy.deepcopy(self.parsestring[1])
self.interleavingTargetRegister = None
def getFeatures(self):
instructionName = None
if len(self.parsestring) > 2:
instructionName = self.inst_name
try:
features = Processor().getAvailableInstructionFeatures(instructionName)
# overwrite mandatory features
for feature in self.mandatoryEnabledFeatures:
features[feature] = [Processor().getFeatureValue(feature, self.mandatoryEnabledFeatures[feature])]
return features
except KeyError:
return None
def get_operand_attr(self):
return self._operandAttr
def getEnabledFeatures(self):
return self._enabledfeatures
def setFeatures(self, features):
"""
Reset all features and set them new.
:param features: to set of the instruction.
:return:
"""
self._enabledfeatures = {}
availableFeatures = Processor().getAvailableInstructionFeatures(self.inst_name)
# self._enabledfeatures = features
for key in features:
if key in availableFeatures:
assemblyFeatureString = features[key]
if assemblyFeatureString in availableFeatures[key]:
self._enabledfeatures[key] = assemblyFeatureString
self._setInstructionAssembly()
def getOperandAssembly(self): # in processor
isPragma = Processor().isPragma(self.inst_name)
# v = Processor().getOperandAssembly(self.parsestring[1], self._operandAttr, self.interleavingTargetRegister, isPragma=isPragma)
return Processor().getOperandAssembly(self.parsestring[1], self._operandAttr, self.interleavingTargetRegister, isPragma=isPragma, isRandValueRandomImmediate=self.isRandValueRandomImmediate)
def __str__(self) -> str:
self._setInstructionAssembly()
return self._instruction + " " + self.getOperandAssembly()
def string(self) -> str:
return self.__str__()
def getRawString(self):
return self.inst_name + " " + self._originalOperandString
def getName(self) -> str:
return self.inst_name
def getAssembly(self, enabledFeatures, globalMandatoryFeatures={}):
self.setFeatures(enabledFeatures)
self.setGlobalMandatoryFeatures(globalMandatoryFeatures)
return self.string()
def setEnabledFeatures(self, key: int, value: int):
self._enabledfeatures[key] = value
#if value in self.features[key]:
# self._enabledfeatures[key] = value
#else:
# self._enabledfeatures[key] = None
self._setInstructionAssembly()
def resetFeatures(self):
if self.features is None:
return
for key in self.features:
self._enabledfeatures[key] = self.features[key][0]
self._setInstructionAssembly()
def _setInstructionAssembly(self):
if self.mutable:
self.overrideMandatoryFeatures()
self._instruction = Processor().getInstructionAssemblyString(self.parsestring[0],
self._enabledfeatures)
if OPERANDS.BRANCH_INDEX.value in self._instruction:
if OPERANDS.BRANCH_INDEX.value in self._operandAttr:
self._instruction = self._instruction.replace(OPERANDS.BRANCH_INDEX.value, str(self._operandAttr[OPERANDS.BRANCH_INDEX.value]))
n=3
def overrideMandatoryFeatures(self):
for feature in self.mandatoryEnabledFeatures:
self._enabledfeatures[feature] = Processor().getFeatureValue(feature, self.mandatoryEnabledFeatures[feature])
for key in self.globalMandatoryFeatures:
if key in self._enabledfeatures and key in self.mandatoryEnabledFeatures:
self._enabledfeatures[key] = self.globalMandatoryFeatures[key]
def setGlobalMandatoryFeatures(self, globalMandatoryFeature):
self.globalMandatoryFeatures = globalMandatoryFeature
def __eq__(self, other):
if type(self) != type(other):
return False
return self.__str__() == other.__str__()
def setOperands(self, operands):
self._operandAttr = operands
def setOverrideTargetOperand(self, overRidingTargetOperand):
self.interleavingTargetRegister = overRidingTargetOperand
def enableRandValueRandomImmediate(self):
self.isRandValueRandomImmediate = True
| [
"util.Processor.Processor",
"copy.deepcopy"
] | [((1025, 1059), 'copy.deepcopy', 'copy.deepcopy', (['self.parsestring[1]'], {}), '(self.parsestring[1])\n', (1038, 1059), False, 'import copy\n'), ((473, 484), 'util.Processor.Processor', 'Processor', ([], {}), '()\n', (482, 484), False, 'from util.Processor import Processor\n'), ((554, 565), 'util.Processor.Processor', 'Processor', ([], {}), '()\n', (563, 565), False, 'from util.Processor import Processor\n'), ((2017, 2028), 'util.Processor.Processor', 'Processor', ([], {}), '()\n', (2026, 2028), False, 'from util.Processor import Processor\n'), ((2496, 2507), 'util.Processor.Processor', 'Processor', ([], {}), '()\n', (2505, 2507), False, 'from util.Processor import Processor\n'), ((2685, 2696), 'util.Processor.Processor', 'Processor', ([], {}), '()\n', (2694, 2696), False, 'from util.Processor import Processor\n'), ((1286, 1297), 'util.Processor.Processor', 'Processor', ([], {}), '()\n', (1295, 1297), False, 'from util.Processor import Processor\n'), ((4082, 4093), 'util.Processor.Processor', 'Processor', ([], {}), '()\n', (4091, 4093), False, 'from util.Processor import Processor\n'), ((4672, 4683), 'util.Processor.Processor', 'Processor', ([], {}), '()\n', (4681, 4683), False, 'from util.Processor import Processor\n'), ((1485, 1496), 'util.Processor.Processor', 'Processor', ([], {}), '()\n', (1494, 1496), False, 'from util.Processor import Processor\n')] |
import carla
import random
import time
from tqdm import tqdm
client = carla.Client('localhost', 2000)
client.set_timeout(2.0)
for mid, map_name in enumerate(client.get_available_maps()):
world = client.load_world(map_name)
blueprint_library = world.get_blueprint_library()
print('load map', map_name)
bp_rgb = blueprint_library.find('sensor.camera.rgb')
bp_rgb.set_attribute('sensor_tick', '0.1')
bp_rgb.set_attribute('image_size_x', '1024')
bp_rgb.set_attribute('image_size_y', '1024')
bp_seg = blueprint_library.find('sensor.camera.semantic_segmentation')
bp_seg.set_attribute('sensor_tick', '0.1')
bp_seg.set_attribute('image_size_x', '1024')
bp_seg.set_attribute('image_size_y', '1024')
cc_rgb = carla.ColorConverter.Raw
cc_seg = carla.ColorConverter.CityScapesPalette
actors = []
for i, transform in tqdm(enumerate(world.get_map().get_spawn_points())):
transform.location.z += 3.0
transform.rotation.pitch = -45.0
camera_rgb = world.spawn_actor(bp_rgb, transform)
actors.append(camera_rgb)
camera_rgb.listen(lambda image: image.save_to_disk('_out/%02d_%05d_rgb_%06d.png' % (mid, i, image.frame), cc_rgb))
time.sleep(0.15)
for actor in actors:
actor.destroy()
# 通过存取list的方式让destory方法可调用,直接调用可能报错
actors = []
camera_seg = world.spawn_actor(bp_seg, transform)
actors.append(camera_seg)
camera_seg.listen(lambda image: image.save_to_disk('_out/%02d_%05d_seg_%06d.png' % (mid, i, image.frame), cc_seg))
time.sleep(0.15)
for actor in actors:
actor.destroy()
actors = []
time.sleep(1)
print('all %d point done.' % len(world.get_map().get_spawn_points())) | [
"carla.Client",
"time.sleep"
] | [((71, 102), 'carla.Client', 'carla.Client', (['"""localhost"""', '(2000)'], {}), "('localhost', 2000)\n", (83, 102), False, 'import carla\n'), ((1688, 1701), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1698, 1701), False, 'import time\n'), ((1223, 1239), 'time.sleep', 'time.sleep', (['(0.15)'], {}), '(0.15)\n', (1233, 1239), False, 'import time\n'), ((1585, 1601), 'time.sleep', 'time.sleep', (['(0.15)'], {}), '(0.15)\n', (1595, 1601), False, 'import time\n')] |
"""
Displays FISH data, raw and deconvolved, with spots detected using starFISH
"""
from skimage.io import imread
import numpy as np
from napari import Viewer, gui_qt
raw = imread('data-njs/smFISH/raw.tif')
deconvolved = imread('data-njs/smFISH/deconvolved.tif')
spots = np.loadtxt('data-njs/smFISH/spots.csv', delimiter=',')
print(raw.shape)
with gui_qt():
# create an empty viewer
viewer = Viewer()
# add the raw images
raw_layer = viewer.add_image(raw, name='images', colormap='gray', contrast_limits=(140.0, 1300.0))
decon_layer = viewer.add_image(deconvolved, name='deconvolved', colormap='gray', contrast_limits=(0.0, 0.2))
decon_layer.visible = False
spots_layer = viewer.add_points(spots, face_color='red',
edge_color='red', symbol='ring', size=8,
n_dimensional=True, name='spots')
spots_layer.opacity = 0.5
@viewer.bind_key('s')
def swap(viewer):
"""Swaps dims
"""
viewer.dims.order = np.roll(viewer.dims.order, 1)
| [
"napari.Viewer",
"numpy.roll",
"napari.gui_qt",
"skimage.io.imread",
"numpy.loadtxt"
] | [((175, 208), 'skimage.io.imread', 'imread', (['"""data-njs/smFISH/raw.tif"""'], {}), "('data-njs/smFISH/raw.tif')\n", (181, 208), False, 'from skimage.io import imread\n'), ((223, 264), 'skimage.io.imread', 'imread', (['"""data-njs/smFISH/deconvolved.tif"""'], {}), "('data-njs/smFISH/deconvolved.tif')\n", (229, 264), False, 'from skimage.io import imread\n'), ((273, 327), 'numpy.loadtxt', 'np.loadtxt', (['"""data-njs/smFISH/spots.csv"""'], {'delimiter': '""","""'}), "('data-njs/smFISH/spots.csv', delimiter=',')\n", (283, 327), True, 'import numpy as np\n'), ((352, 360), 'napari.gui_qt', 'gui_qt', ([], {}), '()\n', (358, 360), False, 'from napari import Viewer, gui_qt\n'), ((404, 412), 'napari.Viewer', 'Viewer', ([], {}), '()\n', (410, 412), False, 'from napari import Viewer, gui_qt\n'), ((1041, 1070), 'numpy.roll', 'np.roll', (['viewer.dims.order', '(1)'], {}), '(viewer.dims.order, 1)\n', (1048, 1070), True, 'import numpy as np\n')] |
import shelve
shelfFile = shelve.open('mydata')
file = open('allCountries.txt')
print('Dividiendo el fichero en un array de líneas')
lineas = file.readlines()
print('OK')
shelfFile['arraylineas'] = lineas
shelfFile.close()
| [
"shelve.open"
] | [((27, 48), 'shelve.open', 'shelve.open', (['"""mydata"""'], {}), "('mydata')\n", (38, 48), False, 'import shelve\n')] |
import b
def get_str():
return f"123_{b.get_hoge()}_456" # 123_hoge_456
if __name__ == "__main__":
print(get_str())
| [
"b.get_hoge"
] | [((44, 56), 'b.get_hoge', 'b.get_hoge', ([], {}), '()\n', (54, 56), False, 'import b\n')] |
# The MIT License (MIT)
#
# Copyright (c) 2020-2022 <NAME>
#
# Use of this source code is governed by The MIT License (MIT)
# that can be found in the LICENSE.txt file.
"""
APIs for handling processes
"""
import subprocess
from shlibvischeck.common.error import error
__all__ = ['run', 'is_runnable']
def run(cmd, fatal=True):
""" Simple wrapper for subprocess. """
if isinstance(cmd, str):
cmd = cmd.split(' ')
# print(cmd)
with subprocess.Popen(cmd, stdin=None, stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as p:
out, err = p.communicate()
out = out.decode()
err = err.decode()
if fatal and p.returncode != 0:
error(f"'{cmd}' failed:\n{out}{err}")
return p.returncode, out, err
def is_runnable(name):
""" Check if program is present in path. """
rc, _, _ = run([name, "--help"], fatal=False)
return rc == 0
| [
"shlibvischeck.common.error.error",
"subprocess.Popen"
] | [((449, 535), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'stdin': 'None', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(cmd, stdin=None, stdout=subprocess.PIPE, stderr=subprocess\n .PIPE)\n', (465, 535), False, 'import subprocess\n'), ((672, 712), 'shlibvischeck.common.error.error', 'error', (['f"""\'{cmd}\' failed:\n{out}{err}"""'], {}), '(f"""\'{cmd}\' failed:\n{out}{err}""")\n', (677, 712), False, 'from shlibvischeck.common.error import error\n')] |
from astra import models
import redis
db = redis.StrictRedis(host='127.0.0.1', decode_responses=True)
class SiteColorModel(models.Model):
color = models.CharField()
def get_db(self):
return db
| [
"redis.StrictRedis",
"astra.models.CharField"
] | [((45, 103), 'redis.StrictRedis', 'redis.StrictRedis', ([], {'host': '"""127.0.0.1"""', 'decode_responses': '(True)'}), "(host='127.0.0.1', decode_responses=True)\n", (62, 103), False, 'import redis\n'), ((154, 172), 'astra.models.CharField', 'models.CharField', ([], {}), '()\n', (170, 172), False, 'from astra import models\n')] |
from recip.validate.types.Block import Block
from recip.validate.types.Tx import Tx
from recip.validate import ValidatorType
def getInstance(vType):
'''Validator Types'''
if ValidatorType.BLOCK == vType:
return Block()
elif ValidatorType.TX == vType:
return Tx() | [
"recip.validate.types.Block.Block",
"recip.validate.types.Tx.Tx"
] | [((237, 244), 'recip.validate.types.Block.Block', 'Block', ([], {}), '()\n', (242, 244), False, 'from recip.validate.types.Block import Block\n'), ((296, 300), 'recip.validate.types.Tx.Tx', 'Tx', ([], {}), '()\n', (298, 300), False, 'from recip.validate.types.Tx import Tx\n')] |
from abc import ABCMeta, abstractmethod
import json
import logging
import copy
import boto3
import botocore
from botocore.exceptions import ClientError
from endgame.shared.response_message import ResponseMessage
from endgame.shared.list_resources_response import ListResourcesResponse
from endgame.shared.response_message import ResponseGetRbp
logger = logging.getLogger(__name__)
class ResourceType(object):
__meta_class__ = ABCMeta
def __init__(
self,
name: str,
resource_type: str,
service: str,
region: str,
client: boto3.Session.client,
current_account_id: str,
override_action: str = None,
include_resource_block: bool = True,
override_resource_block: str = None,
override_account_id_instead_of_principal: bool = False
):
self.name = name
self.resource_type = resource_type
self.client = client
self.current_account_id = current_account_id
self.service = service
self.region = region
self.include_resource_block = include_resource_block # Override for IAM
self.override_action = override_action # Override for IAM
self.override_resource_block = override_resource_block # Override for EFS
self.override_account_id_instead_of_principal = override_account_id_instead_of_principal # Override for logs, sns, sqs, and lambda
self.policy_document = self._get_rbp().policy_document
# Store an original copy of the policy so we can compare it later.
self.original_policy = copy.deepcopy(json.loads(json.dumps(self.policy_document.original_policy)))
def __str__(self):
return '%s' % (json.dumps(json.loads(self.policy_document.__str__())))
@abstractmethod
def _get_rbp(self) -> ResponseGetRbp:
raise NotImplementedError("Must override _get_rbp")
@property
@abstractmethod
def arn(self) -> str:
raise NotImplementedError("Must override arn")
@abstractmethod
def set_rbp(self, evil_policy: dict) -> ResponseMessage:
raise NotImplementedError("Must override set_rbp")
def add_myself(self, evil_principal: str, dry_run: bool = False) -> ResponseMessage:
"""Add your rogue principal to the AWS resource"""
logger.debug(f"Adding {evil_principal} to {self.arn}")
evil_policy = self.policy_document.policy_plus_evil_principal(
victim_account_id=self.current_account_id,
evil_principal=evil_principal,
resource_arn=self.arn
)
if not dry_run:
set_rbp_response = self.set_rbp(evil_policy=evil_policy)
operation = "ADD_MYSELF"
message = set_rbp_response.message
success = set_rbp_response.success
else:
# new_policy = evil_policy
operation = "DRY_RUN_ADD_MYSELF"
message = "DRY_RUN_ADD_MYSELF"
try:
tmp = self._get_rbp()
success = tmp.success
except botocore.exceptions.ClientError as error:
message = str(error)
success = False
response_message = ResponseMessage(message=message, operation=operation, success=success,
evil_principal=evil_principal, victim_resource_arn=self.arn,
original_policy=self.original_policy, updated_policy=evil_policy,
resource_type=self.resource_type, resource_name=self.name,
service=self.service)
return response_message
def undo(self, evil_principal: str, dry_run: bool = False) -> ResponseMessage:
"""Remove all traces"""
logger.debug(f"Removing {evil_principal} from {self.arn}")
policy_stripped = self.policy_document.policy_minus_evil_principal(
victim_account_id=self.current_account_id,
evil_principal=evil_principal,
resource_arn=self.arn
)
if not dry_run:
operation = "UNDO"
set_rbp_response = self.set_rbp(evil_policy=policy_stripped)
message = set_rbp_response.message
success = set_rbp_response.success
else:
operation = "DRY_RUN_UNDO"
message = "DRY_RUN_UNDO"
success = True
response_message = ResponseMessage(message=message, operation=operation, success=success,
evil_principal=evil_principal, victim_resource_arn=self.arn,
original_policy=self.original_policy, updated_policy=policy_stripped,
resource_type=self.resource_type, resource_name=self.name,
service=self.service)
return response_message
class ResourceTypes(object):
__meta_class__ = ABCMeta
def __init__(self, client: boto3.Session.client, current_account_id: str, region: str):
self.client = client
self.current_account_id = current_account_id
self.region = region
def __str__(self):
return '%s' % (json.dumps(self.resources.arn))
@property
@abstractmethod
def resources(self) -> [ListResourcesResponse]:
raise NotImplementedError("Must override property 'resources'")
| [
"logging.getLogger",
"json.dumps",
"endgame.shared.response_message.ResponseMessage"
] | [((354, 381), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (371, 381), False, 'import logging\n'), ((3227, 3522), 'endgame.shared.response_message.ResponseMessage', 'ResponseMessage', ([], {'message': 'message', 'operation': 'operation', 'success': 'success', 'evil_principal': 'evil_principal', 'victim_resource_arn': 'self.arn', 'original_policy': 'self.original_policy', 'updated_policy': 'evil_policy', 'resource_type': 'self.resource_type', 'resource_name': 'self.name', 'service': 'self.service'}), '(message=message, operation=operation, success=success,\n evil_principal=evil_principal, victim_resource_arn=self.arn,\n original_policy=self.original_policy, updated_policy=evil_policy,\n resource_type=self.resource_type, resource_name=self.name, service=self\n .service)\n', (3242, 3522), False, 'from endgame.shared.response_message import ResponseMessage\n'), ((4478, 4777), 'endgame.shared.response_message.ResponseMessage', 'ResponseMessage', ([], {'message': 'message', 'operation': 'operation', 'success': 'success', 'evil_principal': 'evil_principal', 'victim_resource_arn': 'self.arn', 'original_policy': 'self.original_policy', 'updated_policy': 'policy_stripped', 'resource_type': 'self.resource_type', 'resource_name': 'self.name', 'service': 'self.service'}), '(message=message, operation=operation, success=success,\n evil_principal=evil_principal, victim_resource_arn=self.arn,\n original_policy=self.original_policy, updated_policy=policy_stripped,\n resource_type=self.resource_type, resource_name=self.name, service=self\n .service)\n', (4493, 4777), False, 'from endgame.shared.response_message import ResponseMessage\n'), ((5276, 5306), 'json.dumps', 'json.dumps', (['self.resources.arn'], {}), '(self.resources.arn)\n', (5286, 5306), False, 'import json\n'), ((1653, 1701), 'json.dumps', 'json.dumps', (['self.policy_document.original_policy'], {}), '(self.policy_document.original_policy)\n', (1663, 1701), False, 'import json\n')] |
""" Production Agroindustrial Tools """
# Django
from django.db import models
class AgroindustrialTools(models.Model):
""" Modelo Herramientas del la
produccion Agroindustrial """
production_agroindustrial = models.ForeignKey(
"producer.ProductionAgroindustrial",
related_name="agroindustrial_tools",
on_delete=models.CASCADE
)
name_tool = models.CharField(max_length=50)
type_tool = models.CharField(max_length=30)
number_tools = models.PositiveIntegerField()
| [
"django.db.models.PositiveIntegerField",
"django.db.models.CharField",
"django.db.models.ForeignKey"
] | [((225, 347), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""producer.ProductionAgroindustrial"""'], {'related_name': '"""agroindustrial_tools"""', 'on_delete': 'models.CASCADE'}), "('producer.ProductionAgroindustrial', related_name=\n 'agroindustrial_tools', on_delete=models.CASCADE)\n", (242, 347), False, 'from django.db import models\n'), ((393, 424), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (409, 424), False, 'from django.db import models\n'), ((441, 472), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (457, 472), False, 'from django.db import models\n'), ((492, 521), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {}), '()\n', (519, 521), False, 'from django.db import models\n')] |
from django.shortcuts import render, HttpResponse
from home.models import Contact, Library, category
from datetime import datetime
from django.contrib import messages
# Create your views here.
def index(request):
return render(request, "index.html")
#return HttpResponse("Home page")
def about(request):
return render(request, "about.html")
#return HttpResponse("about page")
def Books(request):
return render(request, "books.html")
def Category(request):
lis = Library.objects.all()
return render(request, "category.html", {"lis":lis})
#return HttpResponse("Books page")
def contact(request):
if request.method == "POST":
name = request.POST.get("name")
email = request.POST.get("email")
phone = request.POST.get("phone")
contact = Contact(name=name, email= email, phone= phone, date= datetime.today())
contact.save()
messages.success(request, 'Your message has been sent.')
return render(request, "contact.html")
#return HttpResponse("contact page")
def signup(request):
return render(request, "signup.html") | [
"django.shortcuts.render",
"datetime.datetime.today",
"home.models.Library.objects.all",
"django.contrib.messages.success"
] | [((240, 269), 'django.shortcuts.render', 'render', (['request', '"""index.html"""'], {}), "(request, 'index.html')\n", (246, 269), False, 'from django.shortcuts import render, HttpResponse\n'), ((344, 373), 'django.shortcuts.render', 'render', (['request', '"""about.html"""'], {}), "(request, 'about.html')\n", (350, 373), False, 'from django.shortcuts import render, HttpResponse\n'), ((447, 476), 'django.shortcuts.render', 'render', (['request', '"""books.html"""'], {}), "(request, 'books.html')\n", (453, 476), False, 'from django.shortcuts import render, HttpResponse\n'), ((514, 535), 'home.models.Library.objects.all', 'Library.objects.all', ([], {}), '()\n', (533, 535), False, 'from home.models import Contact, Library, category\n'), ((554, 600), 'django.shortcuts.render', 'render', (['request', '"""category.html"""', "{'lis': lis}"], {}), "(request, 'category.html', {'lis': lis})\n", (560, 600), False, 'from django.shortcuts import render, HttpResponse\n'), ((1030, 1061), 'django.shortcuts.render', 'render', (['request', '"""contact.html"""'], {}), "(request, 'contact.html')\n", (1036, 1061), False, 'from django.shortcuts import render, HttpResponse\n'), ((1140, 1170), 'django.shortcuts.render', 'render', (['request', '"""signup.html"""'], {}), "(request, 'signup.html')\n", (1146, 1170), False, 'from django.shortcuts import render, HttpResponse\n'), ((959, 1015), 'django.contrib.messages.success', 'messages.success', (['request', '"""Your message has been sent."""'], {}), "(request, 'Your message has been sent.')\n", (975, 1015), False, 'from django.contrib import messages\n'), ((908, 924), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (922, 924), False, 'from datetime import datetime\n')] |
import operator
from plenum.test.helper import sdk_send_batches_of_random_and_check
from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data
def nodes_by_rank(txnPoolNodeSet):
return [t[1] for t in sorted([(node.rank, node)
for node in txnPoolNodeSet],
key=operator.itemgetter(0))]
def sdk_ensure_pool_functional(looper, nodes, sdk_wallet, sdk_pool,
num_reqs=10, num_batches=2):
sdk_send_batches_of_random_and_check(looper,
nodes,
sdk_pool,
sdk_wallet,
num_reqs,
num_batches)
ensure_all_nodes_have_same_data(looper, nodes)
def get_node_by_name(txnPoolNodeSet, name):
return next(node for node in txnPoolNodeSet if node.name == name)
def nodes_last_ordered_equal(*nodes):
if len(nodes) < 2:
raise BaseException('nodes_last_ordered_equal can compare less than 2 nodes')
seq_no = next(iter(nodes)).master_last_ordered_3PC[1]
assert all(seq_no == n.master_last_ordered_3PC[1] for n in nodes)
| [
"plenum.test.helper.sdk_send_batches_of_random_and_check",
"plenum.test.node_catchup.helper.ensure_all_nodes_have_same_data",
"operator.itemgetter"
] | [((509, 609), 'plenum.test.helper.sdk_send_batches_of_random_and_check', 'sdk_send_batches_of_random_and_check', (['looper', 'nodes', 'sdk_pool', 'sdk_wallet', 'num_reqs', 'num_batches'], {}), '(looper, nodes, sdk_pool, sdk_wallet,\n num_reqs, num_batches)\n', (545, 609), False, 'from plenum.test.helper import sdk_send_batches_of_random_and_check\n'), ((815, 861), 'plenum.test.node_catchup.helper.ensure_all_nodes_have_same_data', 'ensure_all_nodes_have_same_data', (['looper', 'nodes'], {}), '(looper, nodes)\n', (846, 861), False, 'from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data\n'), ((350, 372), 'operator.itemgetter', 'operator.itemgetter', (['(0)'], {}), '(0)\n', (369, 372), False, 'import operator\n')] |
import sys
import os
import time
import opt.example.SixPeaksEvaluationFunction as SixPeaksEvaluationFunction
import dist.DiscreteUniformDistribution as DiscreteUniformDistribution
import opt.DiscreteChangeOneNeighbor as DiscreteChangeOneNeighbor
import opt.ga.DiscreteChangeOneMutation as DiscreteChangeOneMutation
import opt.ga.SingleCrossOver as SingleCrossOver
import dist.DiscreteDependencyTree as DiscreteDependencyTree
import opt.GenericHillClimbingProblem as GenericHillClimbingProblem
import opt.ga.GeneticAlgorithmProblem as GeneticAlgorithmProblem
import opt.prob.GenericProbabilisticOptimizationProblem as GenericProbabilisticOptimizationProblem
import opt.ga.GenericGeneticAlgorithmProblem as GenericGeneticAlgorithmProblem
import opt.RandomizedHillClimbing as RandomizedHillClimbing
import shared.FixedIterationTrainer as FixedIterationTrainer
import opt.SimulatedAnnealing as SimulatedAnnealing
import opt.ga.StandardGeneticAlgorithm as StandardGeneticAlgorithm
import opt.prob.MIMIC as MIMIC
from array import array
N = 10
T = N / 5
fill = [2] * N
ranges = array('i', fill)
ef = SixPeaksEvaluationFunction(T)
odd = DiscreteUniformDistribution(ranges)
nf = DiscreteChangeOneNeighbor(ranges)
mf = DiscreteChangeOneMutation(ranges)
cf = SingleCrossOver()
df = DiscreteDependencyTree(.1, ranges)
hcp = GenericHillClimbingProblem(ef, odd, nf)
gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
pop = GenericProbabilisticOptimizationProblem(ef, odd, df)
rhc = RandomizedHillClimbing(hcp)
fit = FixedIterationTrainer(rhc, 5)
fit.train()
print("RHC: ", ef.value(rhc.getOptimal()))
sa = SimulatedAnnealing(1E11, .95, hcp)
fit = FixedIterationTrainer(sa, 200000)
fit.train()
print("SA: ", ef.value(sa.getOptimal()))
ga = StandardGeneticAlgorithm(200, 100, 10, gap)
fit = FixedIterationTrainer(ga, 1000)
fit.train()
print("GA: ", ef.value(ga.getOptimal()))
mimic = MIMIC(200, 20, pop)
fit = FixedIterationTrainer(mimic, 1000)
fit.train()
print("MIMIC: ", ef.value(mimic.getOptimal())) | [
"dist.DiscreteUniformDistribution",
"opt.ga.DiscreteChangeOneMutation",
"array.array",
"dist.DiscreteDependencyTree",
"opt.prob.MIMIC",
"opt.ga.SingleCrossOver",
"shared.FixedIterationTrainer",
"opt.DiscreteChangeOneNeighbor",
"opt.ga.GenericGeneticAlgorithmProblem",
"opt.example.SixPeaksEvaluatio... | [((1074, 1090), 'array.array', 'array', (['"""i"""', 'fill'], {}), "('i', fill)\n", (1079, 1090), False, 'from array import array\n'), ((1097, 1126), 'opt.example.SixPeaksEvaluationFunction', 'SixPeaksEvaluationFunction', (['T'], {}), '(T)\n', (1123, 1126), True, 'import opt.example.SixPeaksEvaluationFunction as SixPeaksEvaluationFunction\n'), ((1133, 1168), 'dist.DiscreteUniformDistribution', 'DiscreteUniformDistribution', (['ranges'], {}), '(ranges)\n', (1160, 1168), True, 'import dist.DiscreteUniformDistribution as DiscreteUniformDistribution\n'), ((1174, 1207), 'opt.DiscreteChangeOneNeighbor', 'DiscreteChangeOneNeighbor', (['ranges'], {}), '(ranges)\n', (1199, 1207), True, 'import opt.DiscreteChangeOneNeighbor as DiscreteChangeOneNeighbor\n'), ((1213, 1246), 'opt.ga.DiscreteChangeOneMutation', 'DiscreteChangeOneMutation', (['ranges'], {}), '(ranges)\n', (1238, 1246), True, 'import opt.ga.DiscreteChangeOneMutation as DiscreteChangeOneMutation\n'), ((1252, 1269), 'opt.ga.SingleCrossOver', 'SingleCrossOver', ([], {}), '()\n', (1267, 1269), True, 'import opt.ga.SingleCrossOver as SingleCrossOver\n'), ((1275, 1310), 'dist.DiscreteDependencyTree', 'DiscreteDependencyTree', (['(0.1)', 'ranges'], {}), '(0.1, ranges)\n', (1297, 1310), True, 'import dist.DiscreteDependencyTree as DiscreteDependencyTree\n'), ((1316, 1355), 'opt.GenericHillClimbingProblem', 'GenericHillClimbingProblem', (['ef', 'odd', 'nf'], {}), '(ef, odd, nf)\n', (1342, 1355), True, 'import opt.GenericHillClimbingProblem as GenericHillClimbingProblem\n'), ((1362, 1409), 'opt.ga.GenericGeneticAlgorithmProblem', 'GenericGeneticAlgorithmProblem', (['ef', 'odd', 'mf', 'cf'], {}), '(ef, odd, mf, cf)\n', (1392, 1409), True, 'import opt.ga.GenericGeneticAlgorithmProblem as GenericGeneticAlgorithmProblem\n'), ((1416, 1468), 'opt.prob.GenericProbabilisticOptimizationProblem', 'GenericProbabilisticOptimizationProblem', (['ef', 'odd', 'df'], {}), '(ef, odd, df)\n', (1455, 1468), True, 'import opt.prob.GenericProbabilisticOptimizationProblem as GenericProbabilisticOptimizationProblem\n'), ((1476, 1503), 'opt.RandomizedHillClimbing', 'RandomizedHillClimbing', (['hcp'], {}), '(hcp)\n', (1498, 1503), True, 'import opt.RandomizedHillClimbing as RandomizedHillClimbing\n'), ((1510, 1539), 'shared.FixedIterationTrainer', 'FixedIterationTrainer', (['rhc', '(5)'], {}), '(rhc, 5)\n', (1531, 1539), True, 'import shared.FixedIterationTrainer as FixedIterationTrainer\n'), ((1601, 1646), 'opt.SimulatedAnnealing', 'SimulatedAnnealing', (['(100000000000.0)', '(0.95)', 'hcp'], {}), '(100000000000.0, 0.95, hcp)\n', (1619, 1646), True, 'import opt.SimulatedAnnealing as SimulatedAnnealing\n'), ((1642, 1675), 'shared.FixedIterationTrainer', 'FixedIterationTrainer', (['sa', '(200000)'], {}), '(sa, 200000)\n', (1663, 1675), True, 'import shared.FixedIterationTrainer as FixedIterationTrainer\n'), ((1735, 1778), 'opt.ga.StandardGeneticAlgorithm', 'StandardGeneticAlgorithm', (['(200)', '(100)', '(10)', 'gap'], {}), '(200, 100, 10, gap)\n', (1759, 1778), True, 'import opt.ga.StandardGeneticAlgorithm as StandardGeneticAlgorithm\n'), ((1785, 1816), 'shared.FixedIterationTrainer', 'FixedIterationTrainer', (['ga', '(1000)'], {}), '(ga, 1000)\n', (1806, 1816), True, 'import shared.FixedIterationTrainer as FixedIterationTrainer\n'), ((1879, 1898), 'opt.prob.MIMIC', 'MIMIC', (['(200)', '(20)', 'pop'], {}), '(200, 20, pop)\n', (1884, 1898), True, 'import opt.prob.MIMIC as MIMIC\n'), ((1905, 1939), 'shared.FixedIterationTrainer', 'FixedIterationTrainer', (['mimic', '(1000)'], {}), '(mimic, 1000)\n', (1926, 1939), True, 'import shared.FixedIterationTrainer as FixedIterationTrainer\n')] |
import json
import os
import random
import re
import subprocess
import sys
from types import ModuleType
DEBUG_RUN_JS = os.getenv("DEBUG_RUN_JS", False) in [
"TRUE",
"true",
"True",
"T",
"t",
"1",
]
DIR_PATH = os.path.dirname(os.path.realpath(__file__))
CWD = os.getcwd()
class NodeFunction:
def __init__(self, function_name, module_name):
self.module_name = module_name
self.function_name = function_name
def __call__(self, *args, **kwargs):
if DEBUG_RUN_JS:
print(
'[js] calling function "'
+ self.function_name
+ '" from module "'
+ self.module_name
+ '"'
)
if kwargs:
raise "[js] run-js does not support keyword arguments"
boundary = "\n--results-below-" + str(random.randint(1e2, 1e10)) + "--\n"
data = {
"module_name": self.module_name,
"function_name": self.function_name,
"params": args,
"boundary": boundary,
}
dumped = json.dumps(data)
file_path = os.path.join(DIR_PATH, "scripts/run.js")
process = subprocess.Popen(
["node", file_path],
shell=False,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
text=True,
encoding="utf-8",
cwd=CWD,
)
output, err = process.communicate(dumped, timeout=60)
if process.returncode != 0:
print(
"[run-js] the JavaScript command returned a non-zero exit code, so we are printing the error log, so you can figure out what happened"
)
print(err)
msg = next(
ln for ln in err.split("\n") if re.match("^([A-Z][a-z]+)?Error:", ln)
)
if msg:
raise Exception(msg)
raise Exception("[js.py] Command Failed")
splat = output.split(boundary)
if len(splat) != 2:
log = splat[0]
print(log)
return None
log, results = splat
print(log)
return json.loads(results)
def __str__(self):
return self.module_name + "." + self.function_name
class NodeModule:
def __init__(self, module_name):
self.module_name = module_name
file_path = os.path.join(DIR_PATH, "scripts/exists.js")
# check if need to install module
process = subprocess.Popen(
["node", file_path],
shell=False,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
text=True,
encoding="utf-8",
cwd=CWD,
)
process.communicate(module_name, timeout=60)
if process.returncode == 1:
# module_name might include a subpath, so remove that
if "/" in module_name:
if module_name.startswith("@"):
module_name = "/".join(module_name.split("/")[0:2])
else:
module_name = module_name.split("/")[0]
print("It doesn't appear that " + module_name + " is installed.")
print("We will now install it via https://www.npmjs.com/")
res = input("Press Y (yes) to continue or N (no) to cancel.\n")
if res.upper() not in ["Y", "YES"]:
raise Exception(
'module by the name "' + module_name + '" is not installed.'
)
# validate module_name
if not re.match(r"^@?[A-Za-z_\-\.\/]+$", module_name):
raise Exception("invalid module name")
# create package.json if none exists
package_json_file_path = os.path.join(CWD, "package.json")
if not os.path.isfile(package_json_file_path):
with open(package_json_file_path, mode="w", encoding="utf-8") as f:
f.write(json.dumps({"name": "run-js", "private": True}))
print("installing " + module_name)
subprocess.call(["npm", "install", module_name], shell=False, cwd=CWD)
def __getattr__(self, function_name):
if DEBUG_RUN_JS:
print(
'[js] loading function "'
+ function_name
+ '" from module "'
+ self.module_name
+ '"'
)
return NodeFunction(function_name=function_name, module_name=self.module_name)
def __getitem__(self, function_name):
if DEBUG_RUN_JS:
print(
'[js] loading function "'
+ function_name
+ '" from module "'
+ self.module_name
+ '"'
)
return NodeFunction(function_name=function_name, module_name=self.module_name)
def __call__(self, *args):
return NodeFunction(module_name=self.module_name, function_name="default")(
*args
)
class ModuleWrapper(ModuleType):
__path__ = []
def __init__(self, module):
# hack learned from https://github.com/amoffat/sh
super().__init__(
name=getattr(module, "__name__", None),
doc=getattr(module, "__doc__", None),
)
def __getitem__(self, key):
if DEBUG_RUN_JS:
print('[js] ModuleWrapper getting item "' + key + '"')
return NodeModule(module_name=key)
def __getattr__(self, attr):
if DEBUG_RUN_JS:
print('[js] ModuleWrapper getting attribute"' + attr + '"')
return NodeModule(module_name=attr)
if __name__ != "__main__":
if DEBUG_RUN_JS:
print("[js] importing " + __name__)
sys.modules[__name__] = ModuleWrapper(sys.modules[__name__])
| [
"json.loads",
"os.getenv",
"subprocess.Popen",
"json.dumps",
"os.path.join",
"re.match",
"os.getcwd",
"os.path.realpath",
"os.path.isfile",
"subprocess.call",
"random.randint"
] | [((286, 297), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (295, 297), False, 'import os\n'), ((120, 152), 'os.getenv', 'os.getenv', (['"""DEBUG_RUN_JS"""', '(False)'], {}), "('DEBUG_RUN_JS', False)\n", (129, 152), False, 'import os\n'), ((251, 277), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (267, 277), False, 'import os\n'), ((1098, 1114), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (1108, 1114), False, 'import json\n'), ((1136, 1176), 'os.path.join', 'os.path.join', (['DIR_PATH', '"""scripts/run.js"""'], {}), "(DIR_PATH, 'scripts/run.js')\n", (1148, 1176), False, 'import os\n'), ((1196, 1364), 'subprocess.Popen', 'subprocess.Popen', (["['node', file_path]"], {'shell': '(False)', 'stderr': 'subprocess.PIPE', 'stdin': 'subprocess.PIPE', 'stdout': 'subprocess.PIPE', 'text': '(True)', 'encoding': '"""utf-8"""', 'cwd': 'CWD'}), "(['node', file_path], shell=False, stderr=subprocess.PIPE,\n stdin=subprocess.PIPE, stdout=subprocess.PIPE, text=True, encoding=\n 'utf-8', cwd=CWD)\n", (1212, 1364), False, 'import subprocess\n'), ((2211, 2230), 'json.loads', 'json.loads', (['results'], {}), '(results)\n', (2221, 2230), False, 'import json\n'), ((2431, 2474), 'os.path.join', 'os.path.join', (['DIR_PATH', '"""scripts/exists.js"""'], {}), "(DIR_PATH, 'scripts/exists.js')\n", (2443, 2474), False, 'import os\n'), ((2536, 2704), 'subprocess.Popen', 'subprocess.Popen', (["['node', file_path]"], {'shell': '(False)', 'stderr': 'subprocess.PIPE', 'stdin': 'subprocess.PIPE', 'stdout': 'subprocess.PIPE', 'text': '(True)', 'encoding': '"""utf-8"""', 'cwd': 'CWD'}), "(['node', file_path], shell=False, stderr=subprocess.PIPE,\n stdin=subprocess.PIPE, stdout=subprocess.PIPE, text=True, encoding=\n 'utf-8', cwd=CWD)\n", (2552, 2704), False, 'import subprocess\n'), ((3847, 3880), 'os.path.join', 'os.path.join', (['CWD', '"""package.json"""'], {}), "(CWD, 'package.json')\n", (3859, 3880), False, 'import os\n'), ((4161, 4231), 'subprocess.call', 'subprocess.call', (["['npm', 'install', module_name]"], {'shell': '(False)', 'cwd': 'CWD'}), "(['npm', 'install', module_name], shell=False, cwd=CWD)\n", (4176, 4231), False, 'import subprocess\n'), ((3657, 3705), 're.match', 're.match', (['"""^@?[A-Za-z_\\\\-\\\\.\\\\/]+$"""', 'module_name'], {}), "('^@?[A-Za-z_\\\\-\\\\.\\\\/]+$', module_name)\n", (3665, 3705), False, 'import re\n'), ((3900, 3938), 'os.path.isfile', 'os.path.isfile', (['package_json_file_path'], {}), '(package_json_file_path)\n', (3914, 3938), False, 'import os\n'), ((860, 896), 'random.randint', 'random.randint', (['(100.0)', '(10000000000.0)'], {}), '(100.0, 10000000000.0)\n', (874, 896), False, 'import random\n'), ((1841, 1878), 're.match', 're.match', (['"""^([A-Z][a-z]+)?Error:"""', 'ln'], {}), "('^([A-Z][a-z]+)?Error:', ln)\n", (1849, 1878), False, 'import re\n'), ((4052, 4099), 'json.dumps', 'json.dumps', (["{'name': 'run-js', 'private': True}"], {}), "({'name': 'run-js', 'private': True})\n", (4062, 4099), False, 'import json\n')] |
import os
import max_quant as MQ
# Process the MaxQuant results of the stress response and stress response Hog1
# inhibition experiments
if __name__ == '__main__':
root = '../'
table_dir = os.path.join(root, 'tables')
fasta_path = os.path.join(root, 'yeast_cont_20140324.fasta')
mq_txt_folder = 'MQ_txt_SR_Romanov2017'
evidence_path = os.path.join(
root, 'MaxQuant', mq_txt_folder, 'evidence.txt'
)
experiments_toreverse = [
'01_nacl_5min', '02_nacl_5min', '03_nacl_5min', '04_nacl_5min',
'05_nacl_5min', '06_nacl_5min', '01_hog1as_5min', '02_hog1as_5min'
]
setups = ['SR', 'SR-hog1as']
from collections import defaultdict as ddict
exp_to_setup = ddict(lambda: 'exclude')
for exp in ['01_hog1as_5min', '02_hog1as_5min']:
exp_to_setup[exp] = 'SR-hog1as'
for exp in ['01_nacl_5min', '02_nacl_5min', '03_nacl_5min',
'04_nacl_5min', '05_nacl_5min', '06_nacl_5min']:
exp_to_setup[exp] = 'SR'
# Import Evidence
evidence, normalization_data = MQ.process_evidence(evidence_path, fasta_path)
evidence['Setup'] = [exp_to_setup[exp] for exp in evidence['Experiment']]
evidence = evidence[(evidence['Setup'] != 'exclude')]
MQ.reverse_ratios(evidence, experiments_toreverse)
# Generate output files
for setup in setups:
ev_setup = evidence[(evidence['Setup'] == setup)]
evidence_outpath = os.path.join(table_dir, 'evidence_' + setup + '.tsv')
phosphosite_outpath = os.path.join(table_dir, 'phospho_' + setup + '.tsv')
protein_outpath = os.path.join(table_dir, 'protein_' + setup + '.tsv')
MQ.write_evidence_table(ev_setup, evidence_outpath)
MQ.write_phosphosite_table(ev_setup, phosphosite_outpath, prob_cutoff=0.7)
MQ.write_protein_table(ev_setup, protein_outpath)
| [
"max_quant.reverse_ratios",
"max_quant.write_phosphosite_table",
"max_quant.process_evidence",
"os.path.join",
"max_quant.write_protein_table",
"collections.defaultdict",
"max_quant.write_evidence_table"
] | [((201, 229), 'os.path.join', 'os.path.join', (['root', '"""tables"""'], {}), "(root, 'tables')\n", (213, 229), False, 'import os\n'), ((247, 294), 'os.path.join', 'os.path.join', (['root', '"""yeast_cont_20140324.fasta"""'], {}), "(root, 'yeast_cont_20140324.fasta')\n", (259, 294), False, 'import os\n'), ((359, 420), 'os.path.join', 'os.path.join', (['root', '"""MaxQuant"""', 'mq_txt_folder', '"""evidence.txt"""'], {}), "(root, 'MaxQuant', mq_txt_folder, 'evidence.txt')\n", (371, 420), False, 'import os\n'), ((724, 749), 'collections.defaultdict', 'ddict', (["(lambda : 'exclude')"], {}), "(lambda : 'exclude')\n", (729, 749), True, 'from collections import defaultdict as ddict\n'), ((1062, 1108), 'max_quant.process_evidence', 'MQ.process_evidence', (['evidence_path', 'fasta_path'], {}), '(evidence_path, fasta_path)\n', (1081, 1108), True, 'import max_quant as MQ\n'), ((1249, 1299), 'max_quant.reverse_ratios', 'MQ.reverse_ratios', (['evidence', 'experiments_toreverse'], {}), '(evidence, experiments_toreverse)\n', (1266, 1299), True, 'import max_quant as MQ\n'), ((1440, 1493), 'os.path.join', 'os.path.join', (['table_dir', "('evidence_' + setup + '.tsv')"], {}), "(table_dir, 'evidence_' + setup + '.tsv')\n", (1452, 1493), False, 'import os\n'), ((1524, 1576), 'os.path.join', 'os.path.join', (['table_dir', "('phospho_' + setup + '.tsv')"], {}), "(table_dir, 'phospho_' + setup + '.tsv')\n", (1536, 1576), False, 'import os\n'), ((1603, 1655), 'os.path.join', 'os.path.join', (['table_dir', "('protein_' + setup + '.tsv')"], {}), "(table_dir, 'protein_' + setup + '.tsv')\n", (1615, 1655), False, 'import os\n'), ((1665, 1716), 'max_quant.write_evidence_table', 'MQ.write_evidence_table', (['ev_setup', 'evidence_outpath'], {}), '(ev_setup, evidence_outpath)\n', (1688, 1716), True, 'import max_quant as MQ\n'), ((1725, 1799), 'max_quant.write_phosphosite_table', 'MQ.write_phosphosite_table', (['ev_setup', 'phosphosite_outpath'], {'prob_cutoff': '(0.7)'}), '(ev_setup, phosphosite_outpath, prob_cutoff=0.7)\n', (1751, 1799), True, 'import max_quant as MQ\n'), ((1808, 1857), 'max_quant.write_protein_table', 'MQ.write_protein_table', (['ev_setup', 'protein_outpath'], {}), '(ev_setup, protein_outpath)\n', (1830, 1857), True, 'import max_quant as MQ\n')] |
from pyramid.config import Configurator
from pyramid.events import NewRequest, subscriber
from .model.meta import setup_app, Session
from .util import dumps
from pyramid.renderers import JSON
@subscriber(NewRequest)
def cleanup_sess(event):
"""Listen for new requests and assign a cleanup handler to each."""
def remove(request):
Session.remove()
event.request.add_finished_callback(remove)
def main(global_config, **settings):
config = Configurator(settings=settings)
setup_app(global_config, **settings)
config.add_route("start_session", "/login")
config.add_route("withdraw", "/withdraw")
config.add_route("deposit", "/deposit")
config.add_route("balance", "/balance")
config.add_renderer('json', JSON(serializer=dumps))
config.scan()
return config.make_wsgi_app() | [
"pyramid.config.Configurator",
"pyramid.events.subscriber",
"pyramid.renderers.JSON"
] | [((194, 216), 'pyramid.events.subscriber', 'subscriber', (['NewRequest'], {}), '(NewRequest)\n', (204, 216), False, 'from pyramid.events import NewRequest, subscriber\n'), ((464, 495), 'pyramid.config.Configurator', 'Configurator', ([], {'settings': 'settings'}), '(settings=settings)\n', (476, 495), False, 'from pyramid.config import Configurator\n'), ((754, 776), 'pyramid.renderers.JSON', 'JSON', ([], {'serializer': 'dumps'}), '(serializer=dumps)\n', (758, 776), False, 'from pyramid.renderers import JSON\n')] |
#
# Copyright (c) 2015-2020 <NAME> <tflorac AT ulthar.net>
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
"""PyAMS_security_views.skin.login module
This modules defines login and modal login views.
These views are automatically associated with Pyramid forbidden views.
"""
from pyramid.csrf import new_csrf_token
from pyramid.decorator import reify
from pyramid.events import subscriber
from pyramid.httpexceptions import HTTPForbidden, HTTPFound
from pyramid.response import Response
from pyramid.security import forget, remember
from pyramid.view import forbidden_view_config, view_config
from zope.interface import Interface, Invalid, implementer
from zope.schema.fieldproperty import FieldProperty
from pyams_form.ajax import ajax_form_config
from pyams_form.button import Buttons, handler
from pyams_form.field import Fields
from pyams_form.form import AddForm
from pyams_form.interfaces.form import IAJAXFormRenderer, IDataExtractedEvent
from pyams_i18n.interfaces import II18n
from pyams_layer.interfaces import IPyAMSLayer, IResources
from pyams_security.credential import Credentials
from pyams_security.interfaces import ISecurityManager, IViewContextPermissionChecker, \
LOGIN_REFERER_KEY
from pyams_security.interfaces.base import PUBLIC_PERMISSION
from pyams_security_views.interfaces.login import ILoginConfiguration, ILoginFormButtons, \
ILoginFormFields, ILoginView, IModalLoginFormButtons
from pyams_skin.interfaces.view import IModalFullPage, IModalPage
from pyams_skin.interfaces.viewlet import IFooterViewletManager, IHeaderViewletManager
from pyams_template.template import template_config
from pyams_utils.adapter import ContextRequestViewAdapter, adapter_config
from pyams_utils.interfaces.data import IObjectData
from pyams_utils.registry import query_utility
from pyams_utils.text import text_to_html
from pyams_viewlet.viewlet import Viewlet, viewlet_config
__docformat__ = 'restructuredtext'
from pyams_security_views import _ # pylint: disable=ungrouped-imports
@forbidden_view_config(request_type=IPyAMSLayer)
def ForbiddenView(request): # pylint: disable=invalid-name
"""Default forbidden view"""
request.session[LOGIN_REFERER_KEY] = request.url
return HTTPFound('login.html')
@forbidden_view_config(request_type=IPyAMSLayer, renderer='json', xhr=True)
def ForbiddenAJAXView(request): # pylint: disable=invalid-name
"""AJAX forbidden view"""
request.response.status = HTTPForbidden.code
return {
'status': 'modal',
'location': 'login-dialog.html'
}
@ajax_form_config(name='login.html', layer=IPyAMSLayer) # pylint: disable=abstract-method
@implementer(IModalFullPage, ILoginView, IObjectData)
class LoginForm(AddForm):
"""Login form"""
title = _("You must authenticate")
legend = _("Please enter valid credentials")
modal_class = FieldProperty(IModalFullPage['modal_class'])
fields = Fields(ILoginFormFields)
buttons = Buttons(ILoginFormButtons)
edit_permission = None
object_data = {
'ams-warn-on-change': False
}
def update(self):
super().update()
new_csrf_token(self.request)
@handler(buttons['login'])
def login_handler(self, action): # pylint: disable=unused-argument
"""Login button handler"""
data, errors = self.extract_data()
if errors:
self.status = self.form_errors_message
return None
principal_id = data.get('principal_id')
if principal_id is not None:
request = self.request
headers = remember(request, principal_id)
response = request.response
response.headerlist.extend(headers)
if not self.request.is_xhr:
response.status_code = 302
session = request.session
if LOGIN_REFERER_KEY in session:
response.location = session[LOGIN_REFERER_KEY]
del session[LOGIN_REFERER_KEY]
else:
response.location = '/'
return response
return None
@ajax_form_config(name='login-dialog.html', layer=IPyAMSLayer) # pylint: disable=abstract-method
@implementer(IModalPage, ILoginView)
class ModalLoginForm(LoginForm):
"""Modal login form"""
modal_class = 'modal-lg'
buttons = Buttons(IModalLoginFormButtons)
@subscriber(IDataExtractedEvent, form_selector=ILoginView)
def handle_login_form_data(event):
"""Check credentials after data extraction"""
data = event.data
if 'principal_id' in data:
del data['principal_id']
sm = query_utility(ISecurityManager) # pylint: disable=invalid-name
if sm is None:
event.form.widgets.errors += (Invalid(_("Missing security manager utility. "
"Please contact your system administrator!")), )
else:
credentials = Credentials('form', id=data['login'], **data)
principal_id = sm.authenticate(credentials, event.form.request)
if principal_id is None:
event.form.widgets.errors += (Invalid(_("Invalid credentials!")),)
else:
data['principal_id'] = principal_id
@adapter_config(required=(Interface, IPyAMSLayer, ILoginView),
provides=IAJAXFormRenderer)
class LoginFormAJAXRenderer(ContextRequestViewAdapter):
"""Login form result renderer"""
def render(self, changes): # pylint: disable=unused-argument
"""AJAX form renderer"""
status = {'status': 'redirect'}
session = self.request.session
if LOGIN_REFERER_KEY in session:
status['location'] = session[LOGIN_REFERER_KEY] or '/'
del session[LOGIN_REFERER_KEY]
else:
status['location'] = '/'
return status
try:
from pyams_zmi.interfaces.configuration import IZMIConfiguration, MYAMS_BUNDLES
@adapter_config(name='login',
required=(Interface, IPyAMSLayer, ILoginView),
provides=IResources)
class LoginViewResourcesAdapter(ContextRequestViewAdapter):
"""Login view resources adapter"""
weight = 10
@property
def resources(self):
"""Resources getter"""
request = self.request
configuration = IZMIConfiguration(request.root, None)
if configuration is not None:
# yield MyAMS bundle
bundle, _label = MYAMS_BUNDLES.get(configuration.myams_bundle)
yield bundle
except ImportError:
pass
@viewlet_config(name='login.logo', layer=IPyAMSLayer, view=ILoginView,
manager=IHeaderViewletManager, weight=1)
@template_config(template='templates/login-logo.pt')
class LoginLogoViewlet(Viewlet):
"""Login logo viewlet"""
@property
def logo(self):
"""Logo getter"""
configuration = ILoginConfiguration(self.request.root, None)
if configuration:
return II18n(configuration).query_attribute('logo', request=self.request)
return None
@template_config(template='templates/login-viewlet.pt')
class LoginViewlet(Viewlet):
"""Base login viewlet"""
text_value = None
attribute_name = 'header'
renderer_getter = lambda x, y: y
@reify
def configuration(self):
"""Configuration getter"""
return ILoginConfiguration(self.request.root, None)
def render(self):
configuration = self.configuration
if configuration:
# pylint: disable=assignment-from-no-return
value = II18n(configuration).query_attribute(self.attribute_name,
request=self.request)
if value:
renderer = self.renderer_getter(configuration) # pylint: disable=no-value-for-parameter
if renderer == 'text':
self.text_value = value
return super().render()
return text_to_html(value, renderer=renderer)
return ''
@viewlet_config(name='login.header', layer=IPyAMSLayer, view=ILoginView,
manager=IHeaderViewletManager, weight=100)
class LoginHeaderViewlet(LoginViewlet):
"""Login header viewlet"""
attribute_name = 'header'
renderer_getter = lambda x, config: config.header_renderer
@viewlet_config(name='login.footer', layer=IPyAMSLayer, view=ILoginView,
manager=IFooterViewletManager, weight=100)
class LoginFooterViewlet(LoginViewlet):
"""Login footer viewlet"""
attribute_name = 'footer'
renderer_getter = lambda x, config: config.footer_renderer
@view_config(name='logout', request_type=IPyAMSLayer)
def logout(request):
"""Logout view"""
headers = forget(request)
response = Response()
response.headerlist.extend(headers)
response.status_code = 302
response.location = request.referer or '/'
return response
| [
"pyramid.view.forbidden_view_config",
"pyams_utils.adapter.adapter_config",
"pyams_zmi.interfaces.configuration.MYAMS_BUNDLES.get",
"pyams_utils.registry.query_utility",
"pyams_viewlet.viewlet.viewlet_config",
"pyramid.csrf.new_csrf_token",
"zope.schema.fieldproperty.FieldProperty",
"pyams_form.button... | [((2383, 2430), 'pyramid.view.forbidden_view_config', 'forbidden_view_config', ([], {'request_type': 'IPyAMSLayer'}), '(request_type=IPyAMSLayer)\n', (2404, 2430), False, 'from pyramid.view import forbidden_view_config, view_config\n'), ((2615, 2689), 'pyramid.view.forbidden_view_config', 'forbidden_view_config', ([], {'request_type': 'IPyAMSLayer', 'renderer': '"""json"""', 'xhr': '(True)'}), "(request_type=IPyAMSLayer, renderer='json', xhr=True)\n", (2636, 2689), False, 'from pyramid.view import forbidden_view_config, view_config\n'), ((2922, 2976), 'pyams_form.ajax.ajax_form_config', 'ajax_form_config', ([], {'name': '"""login.html"""', 'layer': 'IPyAMSLayer'}), "(name='login.html', layer=IPyAMSLayer)\n", (2938, 2976), False, 'from pyams_form.ajax import ajax_form_config\n'), ((3013, 3065), 'zope.interface.implementer', 'implementer', (['IModalFullPage', 'ILoginView', 'IObjectData'], {}), '(IModalFullPage, ILoginView, IObjectData)\n', (3024, 3065), False, 'from zope.interface import Interface, Invalid, implementer\n'), ((4469, 4530), 'pyams_form.ajax.ajax_form_config', 'ajax_form_config', ([], {'name': '"""login-dialog.html"""', 'layer': 'IPyAMSLayer'}), "(name='login-dialog.html', layer=IPyAMSLayer)\n", (4485, 4530), False, 'from pyams_form.ajax import ajax_form_config\n'), ((4567, 4602), 'zope.interface.implementer', 'implementer', (['IModalPage', 'ILoginView'], {}), '(IModalPage, ILoginView)\n', (4578, 4602), False, 'from zope.interface import Interface, Invalid, implementer\n'), ((4742, 4799), 'pyramid.events.subscriber', 'subscriber', (['IDataExtractedEvent'], {'form_selector': 'ILoginView'}), '(IDataExtractedEvent, form_selector=ILoginView)\n', (4752, 4799), False, 'from pyramid.events import subscriber\n'), ((5572, 5666), 'pyams_utils.adapter.adapter_config', 'adapter_config', ([], {'required': '(Interface, IPyAMSLayer, ILoginView)', 'provides': 'IAJAXFormRenderer'}), '(required=(Interface, IPyAMSLayer, ILoginView), provides=\n IAJAXFormRenderer)\n', (5586, 5666), False, 'from pyams_utils.adapter import ContextRequestViewAdapter, adapter_config\n'), ((6940, 7054), 'pyams_viewlet.viewlet.viewlet_config', 'viewlet_config', ([], {'name': '"""login.logo"""', 'layer': 'IPyAMSLayer', 'view': 'ILoginView', 'manager': 'IHeaderViewletManager', 'weight': '(1)'}), "(name='login.logo', layer=IPyAMSLayer, view=ILoginView,\n manager=IHeaderViewletManager, weight=1)\n", (6954, 7054), False, 'from pyams_viewlet.viewlet import Viewlet, viewlet_config\n'), ((7068, 7119), 'pyams_template.template.template_config', 'template_config', ([], {'template': '"""templates/login-logo.pt"""'}), "(template='templates/login-logo.pt')\n", (7083, 7119), False, 'from pyams_template.template import template_config\n'), ((7447, 7501), 'pyams_template.template.template_config', 'template_config', ([], {'template': '"""templates/login-viewlet.pt"""'}), "(template='templates/login-viewlet.pt')\n", (7462, 7501), False, 'from pyams_template.template import template_config\n'), ((8428, 8546), 'pyams_viewlet.viewlet.viewlet_config', 'viewlet_config', ([], {'name': '"""login.header"""', 'layer': 'IPyAMSLayer', 'view': 'ILoginView', 'manager': 'IHeaderViewletManager', 'weight': '(100)'}), "(name='login.header', layer=IPyAMSLayer, view=ILoginView,\n manager=IHeaderViewletManager, weight=100)\n", (8442, 8546), False, 'from pyams_viewlet.viewlet import Viewlet, viewlet_config\n'), ((8727, 8845), 'pyams_viewlet.viewlet.viewlet_config', 'viewlet_config', ([], {'name': '"""login.footer"""', 'layer': 'IPyAMSLayer', 'view': 'ILoginView', 'manager': 'IFooterViewletManager', 'weight': '(100)'}), "(name='login.footer', layer=IPyAMSLayer, view=ILoginView,\n manager=IFooterViewletManager, weight=100)\n", (8741, 8845), False, 'from pyams_viewlet.viewlet import Viewlet, viewlet_config\n'), ((9026, 9078), 'pyramid.view.view_config', 'view_config', ([], {'name': '"""logout"""', 'request_type': 'IPyAMSLayer'}), "(name='logout', request_type=IPyAMSLayer)\n", (9037, 9078), False, 'from pyramid.view import forbidden_view_config, view_config\n'), ((2588, 2611), 'pyramid.httpexceptions.HTTPFound', 'HTTPFound', (['"""login.html"""'], {}), "('login.html')\n", (2597, 2611), False, 'from pyramid.httpexceptions import HTTPForbidden, HTTPFound\n'), ((3126, 3152), 'pyams_security_views._', '_', (['"""You must authenticate"""'], {}), "('You must authenticate')\n", (3127, 3152), False, 'from pyams_security_views import _\n'), ((3166, 3201), 'pyams_security_views._', '_', (['"""Please enter valid credentials"""'], {}), "('Please enter valid credentials')\n", (3167, 3201), False, 'from pyams_security_views import _\n'), ((3221, 3265), 'zope.schema.fieldproperty.FieldProperty', 'FieldProperty', (["IModalFullPage['modal_class']"], {}), "(IModalFullPage['modal_class'])\n", (3234, 3265), False, 'from zope.schema.fieldproperty import FieldProperty\n'), ((3280, 3304), 'pyams_form.field.Fields', 'Fields', (['ILoginFormFields'], {}), '(ILoginFormFields)\n', (3286, 3304), False, 'from pyams_form.field import Fields\n'), ((3319, 3345), 'pyams_form.button.Buttons', 'Buttons', (['ILoginFormButtons'], {}), '(ILoginFormButtons)\n', (3326, 3345), False, 'from pyams_form.button import Buttons, handler\n'), ((3528, 3553), 'pyams_form.button.handler', 'handler', (["buttons['login']"], {}), "(buttons['login'])\n", (3535, 3553), False, 'from pyams_form.button import Buttons, handler\n'), ((4707, 4738), 'pyams_form.button.Buttons', 'Buttons', (['IModalLoginFormButtons'], {}), '(IModalLoginFormButtons)\n', (4714, 4738), False, 'from pyams_form.button import Buttons, handler\n'), ((4980, 5011), 'pyams_utils.registry.query_utility', 'query_utility', (['ISecurityManager'], {}), '(ISecurityManager)\n', (4993, 5011), False, 'from pyams_utils.registry import query_utility\n'), ((6271, 6371), 'pyams_utils.adapter.adapter_config', 'adapter_config', ([], {'name': '"""login"""', 'required': '(Interface, IPyAMSLayer, ILoginView)', 'provides': 'IResources'}), "(name='login', required=(Interface, IPyAMSLayer, ILoginView),\n provides=IResources)\n", (6285, 6371), False, 'from pyams_utils.adapter import ContextRequestViewAdapter, adapter_config\n'), ((9136, 9151), 'pyramid.security.forget', 'forget', (['request'], {}), '(request)\n', (9142, 9151), False, 'from pyramid.security import forget, remember\n'), ((9167, 9177), 'pyramid.response.Response', 'Response', ([], {}), '()\n', (9175, 9177), False, 'from pyramid.response import Response\n'), ((3493, 3521), 'pyramid.csrf.new_csrf_token', 'new_csrf_token', (['self.request'], {}), '(self.request)\n', (3507, 3521), False, 'from pyramid.csrf import new_csrf_token\n'), ((5277, 5322), 'pyams_security.credential.Credentials', 'Credentials', (['"""form"""'], {'id': "data['login']"}), "('form', id=data['login'], **data)\n", (5288, 5322), False, 'from pyams_security.credential import Credentials\n'), ((7267, 7311), 'pyams_security_views.interfaces.login.ILoginConfiguration', 'ILoginConfiguration', (['self.request.root', 'None'], {}), '(self.request.root, None)\n', (7286, 7311), False, 'from pyams_security_views.interfaces.login import ILoginConfiguration, ILoginFormButtons, ILoginFormFields, ILoginView, IModalLoginFormButtons\n'), ((7741, 7785), 'pyams_security_views.interfaces.login.ILoginConfiguration', 'ILoginConfiguration', (['self.request.root', 'None'], {}), '(self.request.root, None)\n', (7760, 7785), False, 'from pyams_security_views.interfaces.login import ILoginConfiguration, ILoginFormButtons, ILoginFormFields, ILoginView, IModalLoginFormButtons\n'), ((3940, 3971), 'pyramid.security.remember', 'remember', (['request', 'principal_id'], {}), '(request, principal_id)\n', (3948, 3971), False, 'from pyramid.security import forget, remember\n'), ((6682, 6719), 'pyams_zmi.interfaces.configuration.IZMIConfiguration', 'IZMIConfiguration', (['request.root', 'None'], {}), '(request.root, None)\n', (6699, 6719), False, 'from pyams_zmi.interfaces.configuration import IZMIConfiguration, MYAMS_BUNDLES\n'), ((5109, 5194), 'pyams_security_views._', '_', (['"""Missing security manager utility. Please contact your system administrator!"""'], {}), "('Missing security manager utility. Please contact your system administrator!'\n )\n", (5110, 5194), False, 'from pyams_security_views import _\n'), ((6832, 6877), 'pyams_zmi.interfaces.configuration.MYAMS_BUNDLES.get', 'MYAMS_BUNDLES.get', (['configuration.myams_bundle'], {}), '(configuration.myams_bundle)\n', (6849, 6877), False, 'from pyams_zmi.interfaces.configuration import IZMIConfiguration, MYAMS_BUNDLES\n'), ((8368, 8406), 'pyams_utils.text.text_to_html', 'text_to_html', (['value'], {'renderer': 'renderer'}), '(value, renderer=renderer)\n', (8380, 8406), False, 'from pyams_utils.text import text_to_html\n'), ((5478, 5503), 'pyams_security_views._', '_', (['"""Invalid credentials!"""'], {}), "('Invalid credentials!')\n", (5479, 5503), False, 'from pyams_security_views import _\n'), ((7357, 7377), 'pyams_i18n.interfaces.II18n', 'II18n', (['configuration'], {}), '(configuration)\n', (7362, 7377), False, 'from pyams_i18n.interfaces import II18n\n'), ((7954, 7974), 'pyams_i18n.interfaces.II18n', 'II18n', (['configuration'], {}), '(configuration)\n', (7959, 7974), False, 'from pyams_i18n.interfaces import II18n\n')] |
import numpy as np
from scipy.spatial.distance import pdist, squareform
import scipy.cluster.hierarchy as hy
import matplotlib.pyplot as plt
# Creating a cluster of clusters function
def clusters(number=20, cnumber=5, csize=10):
# Note that the way the clusters are positioned is Gaussian randomness.
rnum = np.random.rand(cnumber, 2)
rn = rnum[:, 0] * number
rn = rn.astype(int)
rn[np.where(rn < 5)] = 5
rn[np.where(rn > number / 2.)] = round(number / 2., 0)
ra = rnum[:, 1] * 2.9
ra[np.where(ra < 1.5)] = 1.5
cls = np.random.randn(number, 3) * csize
# Random multipliers for central point of cluster
rxyz = np.random.randn(cnumber - 1, 3)
for i in xrange(cnumber - 1):
tmp = np.random.randn(rn[i + 1], 3)
x = tmp[:, 0] + (rxyz[i, 0] * csize)
y = tmp[:, 1] + (rxyz[i, 1] * csize)
z = tmp[:, 2] + (rxyz[i, 2] * csize)
tmp = np.column_stack([x, y, z])
cls = np.vstack([cls, tmp])
return cls
# Generate a cluster of clusters and distance matrix.
cls = clusters()
D = pdist(cls[:, 0:2])
D = squareform(D)
# Compute and plot first dendrogram.
fig = plt.figure(figsize=(8, 8))
ax1 = fig.add_axes([0.09, 0.1, 0.2, 0.6])
Y1 = hy.linkage(D, method='complete')
cutoff = 0.3 * np.max(Y1[:, 2])
Z1 = hy.dendrogram(Y1, orientation='right', color_threshold=cutoff)
ax1.xaxis.set_visible(False)
ax1.yaxis.set_visible(False)
# Compute and plot second dendrogram.
ax2 = fig.add_axes([0.3, 0.71, 0.6, 0.2])
Y2 = hy.linkage(D, method='average')
cutoff = 0.3 * np.max(Y2[:, 2])
Z2 = hy.dendrogram(Y2, color_threshold=cutoff)
ax2.xaxis.set_visible(False)
ax2.yaxis.set_visible(False)
# Plot distance matrix.
ax3 = fig.add_axes([0.3, 0.1, 0.6, 0.6])
idx1 = Z1['leaves']
idx2 = Z2['leaves']
D = D[idx1, :]
D = D[:, idx2]
ax3.matshow(D, aspect='auto', origin='lower', cmap=plt.cm.YlGnBu)
ax3.xaxis.set_visible(False)
ax3.yaxis.set_visible(False)
# Plot colorbar.
fig.savefig('scipy_352_ex1.pdf', bbox='tight')
| [
"scipy.spatial.distance.squareform",
"scipy.cluster.hierarchy.dendrogram",
"numpy.random.rand",
"numpy.where",
"scipy.spatial.distance.pdist",
"numpy.column_stack",
"numpy.max",
"matplotlib.pyplot.figure",
"scipy.cluster.hierarchy.linkage",
"numpy.vstack",
"numpy.random.randn"
] | [((1070, 1088), 'scipy.spatial.distance.pdist', 'pdist', (['cls[:, 0:2]'], {}), '(cls[:, 0:2])\n', (1075, 1088), False, 'from scipy.spatial.distance import pdist, squareform\n'), ((1093, 1106), 'scipy.spatial.distance.squareform', 'squareform', (['D'], {}), '(D)\n', (1103, 1106), False, 'from scipy.spatial.distance import pdist, squareform\n'), ((1151, 1177), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (1161, 1177), True, 'import matplotlib.pyplot as plt\n'), ((1225, 1257), 'scipy.cluster.hierarchy.linkage', 'hy.linkage', (['D'], {'method': '"""complete"""'}), "(D, method='complete')\n", (1235, 1257), True, 'import scipy.cluster.hierarchy as hy\n'), ((1295, 1357), 'scipy.cluster.hierarchy.dendrogram', 'hy.dendrogram', (['Y1'], {'orientation': '"""right"""', 'color_threshold': 'cutoff'}), "(Y1, orientation='right', color_threshold=cutoff)\n", (1308, 1357), True, 'import scipy.cluster.hierarchy as hy\n'), ((1502, 1533), 'scipy.cluster.hierarchy.linkage', 'hy.linkage', (['D'], {'method': '"""average"""'}), "(D, method='average')\n", (1512, 1533), True, 'import scipy.cluster.hierarchy as hy\n'), ((1571, 1612), 'scipy.cluster.hierarchy.dendrogram', 'hy.dendrogram', (['Y2'], {'color_threshold': 'cutoff'}), '(Y2, color_threshold=cutoff)\n', (1584, 1612), True, 'import scipy.cluster.hierarchy as hy\n'), ((318, 344), 'numpy.random.rand', 'np.random.rand', (['cnumber', '(2)'], {}), '(cnumber, 2)\n', (332, 344), True, 'import numpy as np\n'), ((657, 688), 'numpy.random.randn', 'np.random.randn', (['(cnumber - 1)', '(3)'], {}), '(cnumber - 1, 3)\n', (672, 688), True, 'import numpy as np\n'), ((1273, 1289), 'numpy.max', 'np.max', (['Y1[:, 2]'], {}), '(Y1[:, 2])\n', (1279, 1289), True, 'import numpy as np\n'), ((1549, 1565), 'numpy.max', 'np.max', (['Y2[:, 2]'], {}), '(Y2[:, 2])\n', (1555, 1565), True, 'import numpy as np\n'), ((405, 421), 'numpy.where', 'np.where', (['(rn < 5)'], {}), '(rn < 5)\n', (413, 421), True, 'import numpy as np\n'), ((434, 461), 'numpy.where', 'np.where', (['(rn > number / 2.0)'], {}), '(rn > number / 2.0)\n', (442, 461), True, 'import numpy as np\n'), ((519, 537), 'numpy.where', 'np.where', (['(ra < 1.5)'], {}), '(ra < 1.5)\n', (527, 537), True, 'import numpy as np\n'), ((556, 582), 'numpy.random.randn', 'np.random.randn', (['number', '(3)'], {}), '(number, 3)\n', (571, 582), True, 'import numpy as np\n'), ((737, 766), 'numpy.random.randn', 'np.random.randn', (['rn[i + 1]', '(3)'], {}), '(rn[i + 1], 3)\n', (752, 766), True, 'import numpy as np\n'), ((916, 942), 'numpy.column_stack', 'np.column_stack', (['[x, y, z]'], {}), '([x, y, z])\n', (931, 942), True, 'import numpy as np\n'), ((957, 978), 'numpy.vstack', 'np.vstack', (['[cls, tmp]'], {}), '([cls, tmp])\n', (966, 978), True, 'import numpy as np\n')] |
import pyzipper
import terminal
from sys import argv
from os import remove
from pyzipper import is_zipfile
if __name__ == "__main__":
argv_count = len(argv)
if argv_count < 3:
terminal.how_to()
else:
my_file = argv[1]
if is_zipfile(my_file):
# noinspection PyBroadException
try:
with pyzipper.AESZipFile(my_file, 'r') as archive:
archive.extractall()
for file in archive.namelist():
remove(file)
print("The archive is not password protected!")
except:
method = argv[2].lower()
if method == 'brute':
terminal.brute(my_file)
elif method == 'dictionary':
terminal.dictionary(my_file)
else:
terminal.how_to()
| [
"pyzipper.is_zipfile",
"terminal.dictionary",
"pyzipper.AESZipFile",
"terminal.brute",
"terminal.how_to",
"os.remove"
] | [((195, 212), 'terminal.how_to', 'terminal.how_to', ([], {}), '()\n', (210, 212), False, 'import terminal\n'), ((260, 279), 'pyzipper.is_zipfile', 'is_zipfile', (['my_file'], {}), '(my_file)\n', (270, 279), False, 'from pyzipper import is_zipfile\n'), ((867, 884), 'terminal.how_to', 'terminal.how_to', ([], {}), '()\n', (882, 884), False, 'import terminal\n'), ((363, 396), 'pyzipper.AESZipFile', 'pyzipper.AESZipFile', (['my_file', '"""r"""'], {}), "(my_file, 'r')\n", (382, 396), False, 'import pyzipper\n'), ((526, 538), 'os.remove', 'remove', (['file'], {}), '(file)\n', (532, 538), False, 'from os import remove\n'), ((723, 746), 'terminal.brute', 'terminal.brute', (['my_file'], {}), '(my_file)\n', (737, 746), False, 'import terminal\n'), ((812, 840), 'terminal.dictionary', 'terminal.dictionary', (['my_file'], {}), '(my_file)\n', (831, 840), False, 'import terminal\n')] |
import unittest
import torch
class TensorMultiplying(unittest.TestCase):
def test_tensor_multiplying_last_layer(self):
out = torch.tensor(
[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]],
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]])
out = out.unsqueeze(0).repeat(128,1,1,1)
alfa = torch.tensor([2.0, 3.0, 4.0])
expected = torch.tensor([[[2., 4., 6.],
[8., 10., 12.],
[14., 16., 18.]],
[[3., 6., 9.],
[12., 15., 18.],
[21., 24., 27.]],
[[4., 8., 12.],
[16., 20., 24.],
[28., 32., 36.]]])
expected = expected.unsqueeze(0).repeat(128, 1, 1, 1)
result = out * alfa[None, :, None, None]
self.assertTrue(torch.equal(expected, result))
def test_tensor_multiplying_prelast_layer(self):
out = torch.tensor(
[[[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]], [[[2.0, 2.0, 2.0], [2.0, 2.0, 2.0], [2.0, 2.0, 2.0]], [[2.0, 2.0, 2.0], [2.0, 2.0, 2.0], [2.0, 2.0, 2.0]], [[2.0, 2.0, 2.0], [2.0, 2.0, 2.0], [2.0, 2.0, 2.0]]], [[[3.0, 3.0, 3.0], [3.0, 3.0, 3.0], [3.0, 3.0, 3.0]], [[3.0, 3.0, 3.0], [3.0, 3.0, 3.0], [3.0, 3.0, 3.0]], [[3.0, 3.0, 3.0], [3.0, 3.0, 3.0], [3.0, 3.0, 3.0]]]])
alfa = torch.tensor([2.0, 3.0, 4.0])
expected = torch.tensor([[[2., 4., 6.],
[8., 10., 12.],
[14., 16., 18.]],
[[3., 6., 9.],
[12., 15., 18.],
[21., 24., 27.]],
[[4., 8., 12.],
[16., 20., 24.],
[28., 32., 36.]]])
result = out * alfa[None, :, None, None]
self.assertTrue(torch.equal(expected, result))
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"torch.tensor",
"torch.equal"
] | [((2295, 2310), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2308, 2310), False, 'import unittest\n'), ((139, 322), 'torch.tensor', 'torch.tensor', (['[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]], [[1.0, 2.0, 3.0], [\n 4.0, 5.0, 6.0], [7.0, 8.0, 9.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [\n 7.0, 8.0, 9.0]]]'], {}), '([[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]], [[1.0, \n 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]], [[1.0, 2.0, 3.0], [4.0, \n 5.0, 6.0], [7.0, 8.0, 9.0]]])\n', (151, 322), False, 'import torch\n'), ((405, 434), 'torch.tensor', 'torch.tensor', (['[2.0, 3.0, 4.0]'], {}), '([2.0, 3.0, 4.0])\n', (417, 434), False, 'import torch\n'), ((455, 656), 'torch.tensor', 'torch.tensor', (['[[[2.0, 4.0, 6.0], [8.0, 10.0, 12.0], [14.0, 16.0, 18.0]], [[3.0, 6.0, 9.0],\n [12.0, 15.0, 18.0], [21.0, 24.0, 27.0]], [[4.0, 8.0, 12.0], [16.0, 20.0,\n 24.0], [28.0, 32.0, 36.0]]]'], {}), '([[[2.0, 4.0, 6.0], [8.0, 10.0, 12.0], [14.0, 16.0, 18.0]], [[\n 3.0, 6.0, 9.0], [12.0, 15.0, 18.0], [21.0, 24.0, 27.0]], [[4.0, 8.0, \n 12.0], [16.0, 20.0, 24.0], [28.0, 32.0, 36.0]]])\n', (467, 656), False, 'import torch\n'), ((1129, 1653), 'torch.tensor', 'torch.tensor', (['[[[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], [[1.0, 1.0, 1.0], [\n 1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [\n 1.0, 1.0, 1.0]]], [[[2.0, 2.0, 2.0], [2.0, 2.0, 2.0], [2.0, 2.0, 2.0]],\n [[2.0, 2.0, 2.0], [2.0, 2.0, 2.0], [2.0, 2.0, 2.0]], [[2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0], [2.0, 2.0, 2.0]]], [[[3.0, 3.0, 3.0], [3.0, 3.0, 3.0],\n [3.0, 3.0, 3.0]], [[3.0, 3.0, 3.0], [3.0, 3.0, 3.0], [3.0, 3.0, 3.0]],\n [[3.0, 3.0, 3.0], [3.0, 3.0, 3.0], [3.0, 3.0, 3.0]]]]'], {}), '([[[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], [[1.0, \n 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], [[1.0, 1.0, 1.0], [1.0, \n 1.0, 1.0], [1.0, 1.0, 1.0]]], [[[2.0, 2.0, 2.0], [2.0, 2.0, 2.0], [2.0,\n 2.0, 2.0]], [[2.0, 2.0, 2.0], [2.0, 2.0, 2.0], [2.0, 2.0, 2.0]], [[2.0,\n 2.0, 2.0], [2.0, 2.0, 2.0], [2.0, 2.0, 2.0]]], [[[3.0, 3.0, 3.0], [3.0,\n 3.0, 3.0], [3.0, 3.0, 3.0]], [[3.0, 3.0, 3.0], [3.0, 3.0, 3.0], [3.0, \n 3.0, 3.0]], [[3.0, 3.0, 3.0], [3.0, 3.0, 3.0], [3.0, 3.0, 3.0]]]])\n', (1141, 1653), False, 'import torch\n'), ((1669, 1698), 'torch.tensor', 'torch.tensor', (['[2.0, 3.0, 4.0]'], {}), '([2.0, 3.0, 4.0])\n', (1681, 1698), False, 'import torch\n'), ((1719, 1920), 'torch.tensor', 'torch.tensor', (['[[[2.0, 4.0, 6.0], [8.0, 10.0, 12.0], [14.0, 16.0, 18.0]], [[3.0, 6.0, 9.0],\n [12.0, 15.0, 18.0], [21.0, 24.0, 27.0]], [[4.0, 8.0, 12.0], [16.0, 20.0,\n 24.0], [28.0, 32.0, 36.0]]]'], {}), '([[[2.0, 4.0, 6.0], [8.0, 10.0, 12.0], [14.0, 16.0, 18.0]], [[\n 3.0, 6.0, 9.0], [12.0, 15.0, 18.0], [21.0, 24.0, 27.0]], [[4.0, 8.0, \n 12.0], [16.0, 20.0, 24.0], [28.0, 32.0, 36.0]]])\n', (1731, 1920), False, 'import torch\n'), ((1030, 1059), 'torch.equal', 'torch.equal', (['expected', 'result'], {}), '(expected, result)\n', (1041, 1059), False, 'import torch\n'), ((2231, 2260), 'torch.equal', 'torch.equal', (['expected', 'result'], {}), '(expected, result)\n', (2242, 2260), False, 'import torch\n')] |
import time
from random import randint
from threading import Lock
from typing import List, Optional, Tuple
import telegram
from src.config import CONFIG
from src.modules.antimat.antimat import Antimat
from src.utils.cache import pure_cache, TWO_YEARS, cache, MONTH
from src.utils.callback_helpers import get_callback_data
from src.utils.logger_helpers import get_logger
from src.utils.telegram_helpers import telegram_retry, dsp
logger = get_logger(__name__)
CACHE_PREFIX = 'matshowtime'
def extend_initial_data(data: dict) -> dict:
initial = {"name": CACHE_PREFIX, "module": CACHE_PREFIX}
result = {**initial, **data}
return result
def make_button(title, code_name, id, count=0) -> tuple:
text = title if count == 0 else f'{title} {count}'
data = extend_initial_data({'value': code_name, 'id': id})
return text, data
class TelegramWrapper:
@classmethod
@telegram_retry(logger=logger, title=f'[{CACHE_PREFIX}] send_message')
def send_message(cls,
bot: telegram.Bot,
text: str,
chat_id: int,
buttons=None,
reply_to_message_id=None) -> Optional[int]:
reply_markup = cls.get_reply_markup(buttons)
try:
message = bot.send_message(
chat_id,
text,
reply_markup=reply_markup,
reply_to_message_id=reply_to_message_id,
parse_mode=telegram.ParseMode.HTML,
disable_web_page_preview=True,
timeout=20)
# cache.set(f'{CACHE_PREFIX}:messages:{chat_id}:{message.message_id}:text', message.text_html, time=USER_CACHE_EXPIRE)
return message.message_id
except Exception as e:
logger.error(f"[{CACHE_PREFIX}] Can't send message to {chat_id}. Exception: {e}")
if str(e) == 'Timed out':
raise Exception(e)
return None
@classmethod
def edit_message(cls,
bot: telegram.Bot,
message_id: int,
text: str,
chat_id: int,
buttons=None) -> None:
reply_markup = cls.get_reply_markup(buttons)
try:
bot.edit_message_text(
text,
chat_id,
message_id,
reply_markup=reply_markup,
parse_mode=telegram.ParseMode.HTML,
disable_web_page_preview=True)
# cache.set(f'{CACHE_PREFIX}:messages:{chat_id}:{message_id}:text', text, time=USER_CACHE_EXPIRE)
# cache.set(f'{CACHE_PREFIX}:messages:{chat_id}:{message_id}:buttons', buttons, time=USER_CACHE_EXPIRE)
except Exception as e:
logger.error(f"[{CACHE_PREFIX}] Can't edit message from {chat_id}. Exception: {e}")
@classmethod
def edit_buttons(cls, bot: telegram.Bot, message_id: int, buttons, chat_id: int) -> None:
reply_markup = cls.get_reply_markup(buttons)
try:
bot.edit_message_reply_markup(chat_id, message_id, reply_markup=reply_markup,
timeout=20)
# cache.set(f'{CACHE_PREFIX}:messages:{chat_id}:{message_id}:buttons', buttons, time=USER_CACHE_EXPIRE)
except Exception as e:
logger.error(f"[{CACHE_PREFIX}] Can't edit buttons in {chat_id}. Exception: {e}")
@staticmethod
def get_reply_markup(buttons) -> Optional[telegram.InlineKeyboardMarkup]:
"""
Инлайн-кнопки под сообщением
"""
if not buttons:
return None
keyboard = []
for line in buttons:
keyboard.append([
telegram.InlineKeyboardButton(
button_title,
callback_data=(get_callback_data(button_data)))
for button_title, button_data in line
])
return telegram.InlineKeyboardMarkup(keyboard)
@classmethod
def answer_callback_query_with_bot_link(cls, bot: telegram.Bot, query_id, query_data) -> None:
bot.answer_callback_query(query_id, url=f"t.me/{bot.username}?start={query_data}")
class Poll:
def __init__(self, telegram_message_id: int) -> None:
self.key_prefix = f'{CACHE_PREFIX}:polls:likes:{telegram_message_id}'
def get_count(self) -> Tuple[int, int]:
likes = len(cache.get(f'{self.key_prefix}:like', []))
dislikes = len(cache.get(f'{self.key_prefix}:dislike', []))
return likes, dislikes
def like(self, uid: int) -> bool:
can_vote = self.__incr('all', uid)
if can_vote:
return self.__incr('like', uid)
return False
def dislike(self, uid: int) -> bool:
can_vote = self.__incr('all', uid)
if can_vote:
return self.__incr('dislike', uid)
return False
def __incr(self, type: str, uid: int) -> bool:
key = f'{self.key_prefix}:{type}'
uids: List[int] = cache.get(key, [])
if uid in uids:
return False
uids.append(uid)
cache.set(key, uids, time=MONTH)
return True
class ChannelMessage:
lock = Lock()
callback_like = 'matshowtime_like_click'
callback_dislike = 'matshowtime_dislike_click'
def __init__(self, words: List[str]) -> None:
self.words = words
self.text: Optional[str] = None
self.telegram_message_id: Optional[int] = None
self.id = self.__generate_id()
self.likes = 0
self.dislikes = 0
def send(self, bot: telegram.Bot) -> None:
self.text = self.__prepare_text()
buttons = self.__get_buttons()
self.telegram_message_id = TelegramWrapper.send_message(bot, self.text,
matshowtime.channel_id, buttons)
if not self.telegram_message_id:
logger.error(f"[{CACHE_PREFIX}] Can't send message {self.id}")
return
self.__save()
def __save(self):
cache.set(self.__get_key(self.id), self, time=MONTH)
@classmethod
def get_msg(cls, id: int) -> Optional['ChannelMessage']:
return cache.get(cls.__get_key(id))
@classmethod
def on_poll_click(cls, bot: telegram.Bot, _: telegram.Update, query: telegram.CallbackQuery,
data) -> None:
msg: ChannelMessage = cache.get(cls.__get_key(data['id']))
if not msg:
bot.answer_callback_query(query.id, 'Время голосования истекло')
return
uid = query.from_user.id
telegram_message_id = query.message.message_id
if msg.telegram_message_id != telegram_message_id:
bot.answer_callback_query(query.id, 'Вы сюда как попали???')
logger.warning(f'[{CACHE_PREFIX}] msg {telegram_message_id} access {uid}')
return
with cls.lock:
poll = Poll(telegram_message_id)
if data['value'] == cls.callback_like:
voted = poll.like(uid)
text = '👍'
elif data['value'] == cls.callback_dislike:
voted = poll.dislike(uid)
text = '👎'
else:
bot.answer_callback_query(query.id, 'Вы сюда как попали???')
logger.warning(f'[{CACHE_PREFIX}] msg {telegram_message_id} access {uid}')
return
if not voted:
bot.answer_callback_query(query.id, 'Только один раз')
return
likes, dislikes = poll.get_count()
msg.likes = likes
msg.dislikes = dislikes
dsp(cls.__update_buttons_and_answer, bot, msg, query, text)
@classmethod
def __update_buttons_and_answer(cls, bot, msg, query, text):
start_time = time.time()
msg.update_buttons(bot)
try:
bot.answer_callback_query(query.id, text)
except Exception:
pass
elapsed_time = time.time() - start_time
logger.info(f'update buttons finished in {int(elapsed_time * 1000)} ms')
def __prepare_text(self) -> str:
upper_words = ', '.join(self.words).upper()
return f'<b>{upper_words}</b>'
def __get_buttons(self):
like = make_button('👍', self.callback_like, self.id, self.likes)
dislike = make_button('👎', self.callback_dislike, self.id, self.dislikes)
buttons = [
[like, dislike],
]
return buttons
@staticmethod
def __get_key(id: int) -> str:
return f'{CACHE_PREFIX}:messages:{id}'
def __generate_id(self) -> int:
digits = 8
for count in range(0, 1000):
range_start = 10 ** (digits - 1)
range_end = (10 ** digits) - 1
id = randint(range_start, range_end)
# убедимся, что id уникален
if not cache.get(self.__get_key(id)):
return id
raise Exception(f"[{CACHE_PREFIX}] Can't generate id")
def update_buttons(self, bot: telegram.Bot) -> None:
self.__save()
buttons = self.__get_buttons()
if self.telegram_message_id is None:
logger.error(f"Can't edit buttons")
return
TelegramWrapper.edit_buttons(bot, self.telegram_message_id, buttons, matshowtime.channel_id)
class Matshowtime:
cache_key_words = f'{CACHE_PREFIX}:words'
def __init__(self):
self.channel_id = CONFIG.get('matshowtime', {}).get('channel_id', None)
def send(self, bot: telegram.Bot, mat_words: List[str]) -> None:
# если список слов пуст или в конфиге не указан канал
if len(mat_words) == 0 or not self.channel_id:
return
# мы отправляем в канал только новые слова, которые еще не отправляли
new_words = self.__only_new_words(mat_words)
if len(new_words) == 0:
return
self.__save_words(new_words)
self.__send_to_channel(bot, new_words)
@staticmethod
def __send_to_channel(bot: telegram.Bot, words: List[str]) -> None:
msg = ChannelMessage(words)
msg.send(bot)
def __only_new_words(self, words: List[str]) -> List[str]:
lower_words = [word.lower() for word in words]
used_words = pure_cache.get_set(self.cache_key_words)
not_used_words = set(lower_words) - used_words
return list(not_used_words)
def __save_words(self, new_words: List[str]) -> None:
pure_cache.add_to_set(self.cache_key_words, new_words, time=TWO_YEARS)
class MatshowtimeHandlers:
callbacks = {
ChannelMessage.callback_like: ChannelMessage.on_poll_click,
ChannelMessage.callback_dislike: ChannelMessage.on_poll_click,
}
@classmethod
def cmd_mats(cls, bot: telegram.Bot, update: telegram.Update) -> None:
uid = update.message.from_user.id
# только админ бота может использовать команду
if uid != CONFIG.get('debug_uid', None):
return
# получаем параметры команды (текст после "/mats ")
text = update.message.text.partition(' ')[2].strip()
if not text:
return
# получаем мат
mat_words = list(word.lower() for word in Antimat.bad_words(text))
if len(mat_words) == 0:
return
# отправляем мат
matshowtime.send(bot, mat_words)
@classmethod
def callback_handler(cls, bot: telegram.Bot, update: telegram.Update,
query: telegram.CallbackQuery, data) -> None:
if 'module' not in data or data['module'] != CACHE_PREFIX:
return
if data['value'] not in cls.callbacks:
return
cls.callbacks[data['value']](bot, update, query, data)
matshowtime = Matshowtime()
| [
"src.utils.cache.pure_cache.add_to_set",
"random.randint",
"src.utils.cache.cache.get",
"src.utils.cache.pure_cache.get_set",
"telegram.InlineKeyboardMarkup",
"threading.Lock",
"src.utils.callback_helpers.get_callback_data",
"src.modules.antimat.antimat.Antimat.bad_words",
"src.utils.telegram_helper... | [((441, 461), 'src.utils.logger_helpers.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (451, 461), False, 'from src.utils.logger_helpers import get_logger\n'), ((896, 965), 'src.utils.telegram_helpers.telegram_retry', 'telegram_retry', ([], {'logger': 'logger', 'title': 'f"""[{CACHE_PREFIX}] send_message"""'}), "(logger=logger, title=f'[{CACHE_PREFIX}] send_message')\n", (910, 965), False, 'from src.utils.telegram_helpers import telegram_retry, dsp\n'), ((5212, 5218), 'threading.Lock', 'Lock', ([], {}), '()\n', (5216, 5218), False, 'from threading import Lock\n'), ((3957, 3996), 'telegram.InlineKeyboardMarkup', 'telegram.InlineKeyboardMarkup', (['keyboard'], {}), '(keyboard)\n', (3986, 3996), False, 'import telegram\n'), ((5023, 5041), 'src.utils.cache.cache.get', 'cache.get', (['key', '[]'], {}), '(key, [])\n', (5032, 5041), False, 'from src.utils.cache import pure_cache, TWO_YEARS, cache, MONTH\n'), ((5124, 5156), 'src.utils.cache.cache.set', 'cache.set', (['key', 'uids'], {'time': 'MONTH'}), '(key, uids, time=MONTH)\n', (5133, 5156), False, 'from src.utils.cache import pure_cache, TWO_YEARS, cache, MONTH\n'), ((7646, 7705), 'src.utils.telegram_helpers.dsp', 'dsp', (['cls.__update_buttons_and_answer', 'bot', 'msg', 'query', 'text'], {}), '(cls.__update_buttons_and_answer, bot, msg, query, text)\n', (7649, 7705), False, 'from src.utils.telegram_helpers import telegram_retry, dsp\n'), ((7810, 7821), 'time.time', 'time.time', ([], {}), '()\n', (7819, 7821), False, 'import time\n'), ((10266, 10306), 'src.utils.cache.pure_cache.get_set', 'pure_cache.get_set', (['self.cache_key_words'], {}), '(self.cache_key_words)\n', (10284, 10306), False, 'from src.utils.cache import pure_cache, TWO_YEARS, cache, MONTH\n'), ((10465, 10535), 'src.utils.cache.pure_cache.add_to_set', 'pure_cache.add_to_set', (['self.cache_key_words', 'new_words'], {'time': 'TWO_YEARS'}), '(self.cache_key_words, new_words, time=TWO_YEARS)\n', (10486, 10535), False, 'from src.utils.cache import pure_cache, TWO_YEARS, cache, MONTH\n'), ((4420, 4460), 'src.utils.cache.cache.get', 'cache.get', (['f"""{self.key_prefix}:like"""', '[]'], {}), "(f'{self.key_prefix}:like', [])\n", (4429, 4460), False, 'from src.utils.cache import pure_cache, TWO_YEARS, cache, MONTH\n'), ((4485, 4528), 'src.utils.cache.cache.get', 'cache.get', (['f"""{self.key_prefix}:dislike"""', '[]'], {}), "(f'{self.key_prefix}:dislike', [])\n", (4494, 4528), False, 'from src.utils.cache import pure_cache, TWO_YEARS, cache, MONTH\n'), ((7987, 7998), 'time.time', 'time.time', ([], {}), '()\n', (7996, 7998), False, 'import time\n'), ((8789, 8820), 'random.randint', 'randint', (['range_start', 'range_end'], {}), '(range_start, range_end)\n', (8796, 8820), False, 'from random import randint\n'), ((10936, 10965), 'src.config.CONFIG.get', 'CONFIG.get', (['"""debug_uid"""', 'None'], {}), "('debug_uid', None)\n", (10946, 10965), False, 'from src.config import CONFIG\n'), ((9450, 9479), 'src.config.CONFIG.get', 'CONFIG.get', (['"""matshowtime"""', '{}'], {}), "('matshowtime', {})\n", (9460, 9479), False, 'from src.config import CONFIG\n'), ((11220, 11243), 'src.modules.antimat.antimat.Antimat.bad_words', 'Antimat.bad_words', (['text'], {}), '(text)\n', (11237, 11243), False, 'from src.modules.antimat.antimat import Antimat\n'), ((3840, 3870), 'src.utils.callback_helpers.get_callback_data', 'get_callback_data', (['button_data'], {}), '(button_data)\n', (3857, 3870), False, 'from src.utils.callback_helpers import get_callback_data\n')] |
from unittest import mock
import gym
import numpy as np
from tests.fixtures.envs.dummy import DummyEnv
class DummyDiscretePixelEnv(DummyEnv):
"""
A dummy discrete pixel environment.
It follows Atari game convention, where actions are 'NOOP', 'FIRE', ...
It also contains self.unwrapped.ale.lives, get_action_meanings for testing.
Several properties are made for testing purpose as following:
-Observations are
after reset : np.ones(self._shape).
action 1 (FIRE): np.full(self._shape, 2).
otherwise : random if self.random is True,
otherwise previous state + action.
-The environment has 5 lives.
-Done will be True if
-all 5 lives are exhausted
-env.step(2), followed by env.step(1)
"""
def __init__(self, random=True):
super().__init__(random, obs_dim=(10, 10, 3), action_dim=5)
self.unwrapped.get_action_meanings = self._get_action_meanings
self.unwrapped.ale = mock.Mock()
self.unwrapped.ale.lives = self.get_lives
self._observation_space = gym.spaces.Box(
low=0, high=255, shape=self._obs_dim, dtype=np.uint8)
self.step_called = 0
self._prev_action = None
@property
def observation_space(self):
"""Return an observation space."""
return self._observation_space
@observation_space.setter
def observation_space(self, observation_space):
self._observation_space = observation_space
@property
def action_space(self):
"""Return an action space."""
return gym.spaces.Discrete(self._action_dim)
def _get_action_meanings(self):
return ['NOOP', 'FIRE', 'SLEEP', 'EAT', 'PLAY']
def get_lives(self):
"""Get number of lives."""
return self._lives
def reset(self):
"""Reset the environment."""
self.state = np.ones(self._obs_dim, dtype=np.uint8)
self._lives = 5
self.step_called = 0
return self.state
def step(self, action):
"""
Step the environment.
Before gym fixed overflow issue for sample() in
np.uint8 environment, we will handle the sampling here.
We need high=256 since np.random.uniform sample from [low, high)
(includes low, but excludes high).
"""
done = False
if self.state is not None:
# Simulating FIRE action
if action == 1:
if self._prev_action == 2:
done = True
obs = np.full(self._obs_dim, 2, dtype=np.uint8)
else:
if self.random:
obs = np.random.uniform(
low=0, high=256, size=self._obs_dim).astype(np.uint8)
else:
obs = self.state + action
if self._lives == 0:
raise RuntimeError("DummyEnv: Cannot step when lives = 0!")
self._lives -= 1
if self._lives == 0:
done = True
else:
raise RuntimeError(
"DummyEnv: reset() must be called before step()!")
self.step_called += 1
self._prev_action = action
return obs, 0, done, {'ale.lives': self._lives}
| [
"unittest.mock.Mock",
"numpy.ones",
"gym.spaces.Discrete",
"gym.spaces.Box",
"numpy.random.uniform",
"numpy.full"
] | [((1029, 1040), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (1038, 1040), False, 'from unittest import mock\n'), ((1127, 1195), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': '(0)', 'high': '(255)', 'shape': 'self._obs_dim', 'dtype': 'np.uint8'}), '(low=0, high=255, shape=self._obs_dim, dtype=np.uint8)\n', (1141, 1195), False, 'import gym\n'), ((1649, 1686), 'gym.spaces.Discrete', 'gym.spaces.Discrete', (['self._action_dim'], {}), '(self._action_dim)\n', (1668, 1686), False, 'import gym\n'), ((1959, 1997), 'numpy.ones', 'np.ones', (['self._obs_dim'], {'dtype': 'np.uint8'}), '(self._obs_dim, dtype=np.uint8)\n', (1966, 1997), True, 'import numpy as np\n'), ((2635, 2676), 'numpy.full', 'np.full', (['self._obs_dim', '(2)'], {'dtype': 'np.uint8'}), '(self._obs_dim, 2, dtype=np.uint8)\n', (2642, 2676), True, 'import numpy as np\n'), ((2756, 2810), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0)', 'high': '(256)', 'size': 'self._obs_dim'}), '(low=0, high=256, size=self._obs_dim)\n', (2773, 2810), True, 'import numpy as np\n')] |
from openpyxl import load_workbook
from extractor import read_agenda
# setting up testing files
wb1 = load_workbook('test_example_1.xlsx', data_only=True)
worksheet_1 = wb1.worksheets[0]
wb2 = load_workbook('test_example_2.xlsx', data_only=True)
worksheet_2 = wb2.worksheets[0]
wb3 = load_workbook('test_example_3.xlsx', data_only=True)
worksheet_3 = wb3.worksheets[0]
class TestReadAgendaEventsCount:
"""
checking if reading algorithm returns all the events
"""
def test_worksheet_1(self):
assert len(read_agenda(worksheet_1)) == 16
def test_worksheet_2(self):
assert len(read_agenda(worksheet_2)) == 15
def test_worksheet_3(self):
assert len(read_agenda(worksheet_3)) == 18
| [
"openpyxl.load_workbook",
"extractor.read_agenda"
] | [((105, 157), 'openpyxl.load_workbook', 'load_workbook', (['"""test_example_1.xlsx"""'], {'data_only': '(True)'}), "('test_example_1.xlsx', data_only=True)\n", (118, 157), False, 'from openpyxl import load_workbook\n'), ((197, 249), 'openpyxl.load_workbook', 'load_workbook', (['"""test_example_2.xlsx"""'], {'data_only': '(True)'}), "('test_example_2.xlsx', data_only=True)\n", (210, 249), False, 'from openpyxl import load_workbook\n'), ((289, 341), 'openpyxl.load_workbook', 'load_workbook', (['"""test_example_3.xlsx"""'], {'data_only': '(True)'}), "('test_example_3.xlsx', data_only=True)\n", (302, 341), False, 'from openpyxl import load_workbook\n'), ((533, 557), 'extractor.read_agenda', 'read_agenda', (['worksheet_1'], {}), '(worksheet_1)\n', (544, 557), False, 'from extractor import read_agenda\n'), ((617, 641), 'extractor.read_agenda', 'read_agenda', (['worksheet_2'], {}), '(worksheet_2)\n', (628, 641), False, 'from extractor import read_agenda\n'), ((701, 725), 'extractor.read_agenda', 'read_agenda', (['worksheet_3'], {}), '(worksheet_3)\n', (712, 725), False, 'from extractor import read_agenda\n')] |
import os
import requests
import string
import xml.etree.ElementTree as xml
from os.path import join as j
from time import sleep
from virfac import data_dir
bad_bugs = """Acinetobacter baumannii ACICU
Aeromonas hydrophila subsp. hydrophila ATCC 7966
Aeromonas salmonicida subsp. salmonicida A449
Aeromonas hydrophila ML09-119
Aeromonas veronii B565
Aeromonas hydrophila str. AH-3
Anaplasma phagocytophilum HZ
Bacillus anthracis str. Sterne
Bacillus anthracis
Bacillus cereus ATCC 10987
Bacillus cereus ATCC 14579
Bacillus anthracis str. Ames Ancestor
Bacillus subtilis subsp. subtilis str. 168
Bartonella henselae str. Houston-1
Bartonella quintana str. Toulouse
Bordetella pertussis Tohama I
Brucella melitensis bv. 1 str. 16M
Brucella suis 1330
Burkholderia pseudomallei K96243
Campylobacter jejuni subsp. jejuni NCTC 11168
Campylobacter jejuni subsp. jejuni 81-176
Chlamydia trachomatis D/UW-3/CX
Clostridium perfringens str. 13
Clostridium perfringens ATCC 13124
Clostridium perfringens SM101
Clostridium difficile 630
Clostridium tetani E88
Clostridium perfringens B str. ATCC 3626
Clostridium perfringens str. NCTC 8533B4D
Clostridium perfringens E str. NCIB 10748
Clostridium botulinum C str. 203U28
Clostridium botulinum D str. 1873
Clostridium botulinum Hall 183 (A391)
Clostridium difficile str. CCUG 20309
Clostridium novyi str. ATCC19402
Clostridium septicum str. NH2
Corynebacterium diphtheriae NCTC 13129
Coxiella burnetii CbuK_Q154
Coxiella burnetii RSA 493
Coxiella burnetii RSA 331
Coxiella burnetii CbuG_Q212
Coxiella burnetii Dugway 5J108-111
Enterococcus faecalis V583
Enterococcus faecalis str. MMH594
Enterococcus faecium DO
Enterococcus faecium str. TX2555
Escherichia coli O127:H6 str. E2348/69
Escherichia coli B171
Escherichia coli O157:H7 str. EDL933
Escherichia coli 17-2
Escherichia coli str. 042
Escherichia coli 55989
Escherichia coli O44:H18 042
Escherichia coli CFT073
Escherichia coli O75:K5:H- str. IH11128
Escherichia coli EC7372
Escherichia coli str. A30
Escherichia coli O25b:H4 str. FV9863
Escherichia coli str. C1845
Escherichia coli
Escherichia coli E10703
Escherichia coli O114:H49 str. E29101A
Escherichia coli O159:H4 str. 350C1
Escherichia coli str. E7473
Escherichia coli str. 260-1
Escherichia coli str. ARG-3
Escherichia coli O8:H9 str. WS6788A
Escherichia coli str. H721A
Escherichia coli O18:K1:H7 str. RS218
Escherichia coli UTI89
Escherichia coli str. E-B35
Escherichia coli O111:H- str. E45035
Escherichia coli O78:H11:K80 str. H10407
Escherichia coli O157:H7 str. Sakai
Escherichia coli O55:H7 str. CB9615
Escherichia coli O26 str. C/15333
Escherichia coli ONT:H- str. FV11678
Escherichia coli str. A22
Escherichia coli str. AL 851
Escherichia coli str. 239 KH 89
Escherichia coli O25:H42 str. E11881A
Escherichia coli O114:H- str. WS0115A
Escherichia coli str. 111KH86
Escherichia coli SE11
Escherichia coli O157:H str. 493/89
Escherichia coli ONT:HND str. A16
Escherichia coli C342-62
Escherichia coli O45:K1:H7 str. S88
Haemophilus influenzae Rd KW20
Haemophilus influenzae str. 1007
Haemophilus influenzae AM30 (770235)
Haemophilus influenzae N187
Haemophilus influenzae nontypable strain 3179B
Haemophilus influenzae str. 12
Haemophilus influenzae C54
Haemophilus influenzae TN106
Helicobacter pylori 26695
Helicobacter pylori J99
Klebsiella pneumoniae subsp. pneumoniae NTUH-K2044
Klebsiella pneumoniae subsp. pneumoniae HS11286
Klebsiella pneumoniae subsp. pneumoniae 1084
Legionella pneumophila subsp. pneumophila str. Philadelphia 1
Listeria monocytogenes EGD-e
Listeria ivanovii str. ATCC 19119
Listeria innocua SLCC6294
Mycobacterium tuberculosis H37Rv
Mycoplasma pneumoniae M129
Mycoplasma hyopneumoniae 232
Neisseria meningitidis MC58
Neisseria meningitidis Z2491
Pseudomonas aeruginosa PAO1
Pseudomonas aeruginosa PA103
Rickettsia rickettsii str. <NAME>
Rickettsia conorii str. Malish 7
Salmonella enterica subsp. enterica serovar Typhi str. CT18
Salmonella enterica subsp. enterica serovar Typhimurium str. LT2
Salmonella enterica (serovar typhimurium)
Salmonella enterica subsp. enterica serovar Typhimurium str. 14028s
Shigella flexneri 2a str. 301
Shigella dysenteriae Sd197
Staphylococcus aureus subsp. aureus MW2
Staphylococcus aureus subsp. aureus str. Newman
Staphylococcus aureus subsp. aureus COL
Staphylococcus aureus str. Newman D2C (ATCC 25904)
Staphylococcus aureus ZM
Staphylococcus aureus
Staphylococcus aureus S6
Staphylococcus aureus RN4220
Staphylococcus aureus subsp. aureus N315
Streptococcus pyogenes MGAS315
Streptococcus pyogenes M1 GAS
Streptococcus pyogenes MGAS8232
Streptococcus agalactiae 2603V/R
Streptococcus agalactiae A909
Streptococcus pneumoniae TIGR4
Streptococcus pneumoniae R6
Streptococcus agalactiae FM027022
Streptococcus agalactiae NEM316
Streptococcus pyogenes MGAS5005
Streptococcus pneumoniae Taiwan19F-14
Vibrio cholerae O1 biovar El Tor str. N16961
Vibrio parahaemolyticus RIMD 2210633
Vibrio vulnificus CMCP6
Vibrio parahaemolyticus
Vibrio vulnificus YJ016
Yersinia pestis CO92
Yersinia enterocolitica W1024
Yersinia enterocolitica str. 84-50
Yersinia enterocolitica subsp. enterocolitica 8081
Yersinia pestis KIM 10""".split('\n')
good_bugs = """Actinomyces dentalis
Actinomyces israelii
Actinomyces oricola
Actinomyces oris
Aerococcus viridans
Aerococcus urinae
Arthrobacter agilis
Aerococcus sanguicola
Aerococcus viridans
Arcanobacterium bernardiae
Arthrobacter agilis
Bacillus algicola
Bacillus barbaricus
Bacillus firmus
Bacillus funiculus
Bacillus gibsonii
Bacillus horikoshii
Bacillus niacini
Bacillus pasteurii
Bacillus subtilis inaquosorum
Brevibacillus brevis
Brevibacterium epidermidis
Brevibacterium oxydans
Burkholderia_thailandensis_E264_uid58081
Burkholderia_cenocepacia_MC0_3_uid58769
Burkholderia_phymatum_STM815_uid58699
Burkholderia_CCGE1001_uid42975
Cellulomonas hominis
Cellulomonas turbata
Corynebacterium acnes
Corynebacterium caspium
Corynebacterium flavescens
Corynebacterium genitalium
Corynebacterium imitans
Corynebacterium striatum
Corynebacterium variabilis
Corynebacterium xerosis
Dermabacter hominis
Dermacoccus nishinomiyaensis
Exiguobacterium acetylicum
Flavobacterium arborescens
Flavobacterium maritypicum
Gordonia bronchialis
Escherichia coli str. K-12 substr. MG1655
Escherichia_coli_ED1a_uid59379
Escherichia_coli_SE11_uid59425
Escherichia_fergusonii_ATCC_35469_uid59375
Escherichia_coli_HS_uid58393
Escherichia_coli__BL21_GOLDd_DE3_pLysS_AG__uid59245
Escherichia_coli_IAI1_uid59377
Escherichia_coli_B_REL606_uid58803
Escherichia_coli_K_12_substr__DH10B_uid58979
Exiguobacterium acetylicum
Flavobacterium arborescens
Flavobacterium maritypicum
Gordonia bronchialis
Leifsonia aquatica
Leifsonia xyli
Micrococcus glutamicus
Micrococcus nishinomiyaensis
Micrococcus sedentarius
Pseudomonas_fluorescens_Pf_5_uid57937
Pseudomonas_putida_GB_1_uid58735
Pseudomonas_putida_KT2440_uid57843
Pseudomonas_fluorescens_SBW25_uid62971
Pseudomonas_putida_W619_uid58651
Pseudomonas_brassicacearum_NFM421_uid66303
Pseudomonas_stutzeri_A1501_uid58641
Rhodococcus bronchialis
Rhodococcus erythropolis
Rhodococcus terrae
Staphylococcus albus
Staphylococcus auricularis
Staphylococcus capitis
Staphylococcus epidermidis
Staphylococcus hominis
Staphylococcus nepalensis
Staphylococcus saprophyticus
Staphylococcus vitulinus
Staphylococcus warneri
Streptococcus australis
Streptococcus caprinus
Streptococcus crista
Streptococcus entericus
Streptococcus gordonii
Streptococcus infantarius
Streptococcus mitis
Streptococcus mutans ferus
Streptococcus oralis
Streptococcus salivarius
Streptococcus sanguis
Streptococcus vestibularis
Streptococcus viridans
Tsukamurella inchonensis
Tsukamurella paurometabola
Tsukamurella pulmonis
Virgibacillus pantothenticus""".split('\n')
esearch = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi"
efetch = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi"
elink = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/elink.fcgi"
api_key = '39bc94a6bd1a989fdaacde696739255d7709'
#api_key = None
def get_data_from_ncbi(bug_list=bad_bugs, subdir="pathogens", esearch=esearch, efetch=efetch, elink=elink, api_key=api_key):
for bug in bug_list:
#sleep(.1)
os.makedirs(j(data_dir, subdir), exist_ok=True)
filename = j(data_dir, subdir, bug.lower().translate(str.maketrans('', '', string.punctuation)).replace(' ','_').strip() + '.fasta')
res = requests.get(esearch, params=dict(tool='hackathon2019', db='assembly', term=bug, retmax=1000, api_key=api_key))
res.raise_for_status()
try:
rec = xml.fromstring(res.content)
except xml.ParseError:
print(res.content)
break
ids = [i.text for i in rec.findall('.//IdList/Id')]
print(f'found {len(ids)} genomes for {bug} ...')
for gb_id in ids:
res = requests.get(elink, params=dict(tool='hackathon2019', db='nuccore', dbfrom='assembly', Id=gb_id, retmode='xml', api_key=api_key))
try:
rec = xml.fromstring(res.content)
except xml.ParseError:
print(res.content)
break
#sleep(.1)
for fa_id in [e.text for e in rec.findall(r".//*[DbTo='nuccore']//Id")]:
res = requests.get(efetch, params=dict(tool='hackathon2019', db='nuccore', Id=fa_id, rettype='fasta', api_key=api_key))
with open(filename, 'wb') as fd:
for i, chunk in enumerate(res.iter_content(chunk_size=128)):
fd.write(chunk)
print(f"{i*128} bytes read.")
if i*128 > 5000:
break #just need the first one, otherwise keep going
break
if __name__ == '__main__':
get_data_from_ncbi() | [
"xml.etree.ElementTree.fromstring",
"os.path.join"
] | [((8162, 8181), 'os.path.join', 'j', (['data_dir', 'subdir'], {}), '(data_dir, subdir)\n', (8163, 8181), True, 'from os.path import join as j\n'), ((8494, 8521), 'xml.etree.ElementTree.fromstring', 'xml.fromstring', (['res.content'], {}), '(res.content)\n', (8508, 8521), True, 'import xml.etree.ElementTree as xml\n'), ((8863, 8890), 'xml.etree.ElementTree.fromstring', 'xml.fromstring', (['res.content'], {}), '(res.content)\n', (8877, 8890), True, 'import xml.etree.ElementTree as xml\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
import django
from django.core import signals
from django.core.cache.backends.base import BaseCache
logging.basicConfig()
logger = logging.getLogger(__name__)
def get_cache(backend, **kwargs):
from django.core import cache as dj_cache
if django.VERSION <= (1, 6):
cache = dj_cache.get_cache(backend, **kwargs)
elif django.VERSION >= (3, 2):
cache = dj_cache.caches.create_connection(backend)
else: # Django 1.7 to 3.1
cache = dj_cache._create_cache(backend, **kwargs)
# Some caches -- python-memcached in particular -- need to do a cleanup at the
# end of a request cycle. If not implemented in a particular backend
# cache.close is a no-op. Not available in Django 1.5
if hasattr(cache, "close"):
signals.request_finished.connect(cache.close)
return cache
class FallbackCache(BaseCache):
_cache = None
_cache_fallback = None
def __init__(self, params=None, *args, **kwargs):
BaseCache.__init__(self, *args, **kwargs)
self._cache = get_cache("main_cache")
self._cache_fallback = get_cache("fallback_cache")
def add(self, key, value, timeout=None, version=None):
return self._call_with_fallback(
"add", key, value, timeout=timeout, version=version
)
def get(self, key, default=None, version=None):
return self._call_with_fallback("get", key, default=default, version=version)
def set(self, key, value, timeout=None, version=None, client=None):
return self._call_with_fallback(
"set", key, value, timeout=timeout, version=version
)
def delete(self, key, version=None):
return self._call_with_fallback("delete", key, version=version)
def clear(self):
return self._call_with_fallback("clear")
def _call_with_fallback(self, method, *args, **kwargs):
try:
return self._call_main_cache(args, kwargs, method)
except Exception as e:
logger.warning("Switch to fallback cache")
logger.exception(e)
return self._call_fallback_cache(args, kwargs, method)
def _call_main_cache(self, args, kwargs, method):
return getattr(self._cache, method)(*args, **kwargs)
def _call_fallback_cache(self, args, kwargs, method):
return getattr(self._cache_fallback, method)(*args, **kwargs)
| [
"logging.basicConfig",
"logging.getLogger",
"django.core.cache.backends.base.BaseCache.__init__",
"django.core.cache.get_cache",
"django.core.cache._create_cache",
"django.core.signals.request_finished.connect",
"django.core.cache.caches.create_connection"
] | [((181, 202), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (200, 202), False, 'import logging\n'), ((212, 239), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (229, 239), False, 'import logging\n'), ((372, 409), 'django.core.cache.get_cache', 'dj_cache.get_cache', (['backend'], {}), '(backend, **kwargs)\n', (390, 409), True, 'from django.core import cache as dj_cache\n'), ((848, 893), 'django.core.signals.request_finished.connect', 'signals.request_finished.connect', (['cache.close'], {}), '(cache.close)\n', (880, 893), False, 'from django.core import signals\n'), ((1053, 1094), 'django.core.cache.backends.base.BaseCache.__init__', 'BaseCache.__init__', (['self', '*args'], {}), '(self, *args, **kwargs)\n', (1071, 1094), False, 'from django.core.cache.backends.base import BaseCache\n'), ((461, 503), 'django.core.cache.caches.create_connection', 'dj_cache.caches.create_connection', (['backend'], {}), '(backend)\n', (494, 503), True, 'from django.core import cache as dj_cache\n'), ((551, 592), 'django.core.cache._create_cache', 'dj_cache._create_cache', (['backend'], {}), '(backend, **kwargs)\n', (573, 592), True, 'from django.core import cache as dj_cache\n')] |
import torch
import numpy as np
from ctc_decoders import Scorer, ctc_beam_search_decoder_batch
"""
# 安装语言模型
sudo apt-get install build-essential libboost-all-dev cmake zlib1g-dev libbz2-dev liblzma-dev
git clone https://github.com/NVIDIA/OpenSeq2Seq -b ctc-decoders
mv OpenSeq2Seq/decoders .
rm -rf OpenSeq2Seq
cd decoders
./setup.sh
cd ..
"""
class BeamSearchDecoderWithLM(torch.nn.Module):
def __init__(
self, vocab, beam_width, alpha, beta, lm_path, num_cpus, cutoff_prob=1.0, cutoff_top_n=40):
if lm_path is not None:
self.scorer = Scorer(alpha, beta, model_path=lm_path, vocabulary=vocab)
else:
self.scorer = None
self.vocab = vocab
self.beam_width = beam_width
self.num_cpus = num_cpus
self.cutoff_prob = cutoff_prob
self.cutoff_top_n = cutoff_top_n
@torch.no_grad()
def forward(self, log_probs, log_probs_length):
probs = self.revert_softmax(log_probs)
probs_list = []
for i, prob in enumerate(probs):
probs_list.append(prob[: log_probs_length[i], :])
results = ctc_beam_search_decoder_batch(
probs_list,
self.vocab,
beam_size=self.beam_width,
num_processes=self.num_cpus,
ext_scoring_func=self.scorer,
cutoff_prob=self.cutoff_prob,
cutoff_top_n=self.cutoff_top_n,
)
result = [item[0][1] for item in results]
return result
def revert_softmax(self, logits):
"""
对对数概率还原其softmax值,用于计算语言模型分数
"""
result = np.zeros_like(logits)
for i in range(logits.shape[0]):
item = logits[i]
e = np.exp(item - np.max(item))
result[i] = e / e.sum(axis=-1).reshape([item.shape[0], 1])
return result
if __name__ == '__main__':
vocab = [c.strip() for c in open("data/aishell1-vocab.txt", 'r').readlines()]
lm_path = "/data/chenc/asr/minhang/atc-service/asr/checkpoints/kenlm/cn.arpa"
decoder = BeamSearchDecoderWithLM(vocab=vocab,
beam_width=40,
alpha=1.,
beta=1.,
lm_path=lm_path,
num_cpus=6,
cutoff_prob=1, cutoff_top_n=40)
log_prob = torch.randn((2,1000,4334), dtype=torch.float32)
log_prob = torch.log_softmax(log_prob, dim=-1).numpy()
lengths = torch.IntTensor([100,200]).numpy()
out = decoder.forward(log_probs=log_prob, log_probs_length=lengths)
print()
| [
"torch.log_softmax",
"ctc_decoders.ctc_beam_search_decoder_batch",
"numpy.max",
"ctc_decoders.Scorer",
"torch.no_grad",
"numpy.zeros_like",
"torch.randn",
"torch.IntTensor"
] | [((859, 874), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (872, 874), False, 'import torch\n'), ((2354, 2403), 'torch.randn', 'torch.randn', (['(2, 1000, 4334)'], {'dtype': 'torch.float32'}), '((2, 1000, 4334), dtype=torch.float32)\n', (2365, 2403), False, 'import torch\n'), ((1119, 1329), 'ctc_decoders.ctc_beam_search_decoder_batch', 'ctc_beam_search_decoder_batch', (['probs_list', 'self.vocab'], {'beam_size': 'self.beam_width', 'num_processes': 'self.num_cpus', 'ext_scoring_func': 'self.scorer', 'cutoff_prob': 'self.cutoff_prob', 'cutoff_top_n': 'self.cutoff_top_n'}), '(probs_list, self.vocab, beam_size=self.\n beam_width, num_processes=self.num_cpus, ext_scoring_func=self.scorer,\n cutoff_prob=self.cutoff_prob, cutoff_top_n=self.cutoff_top_n)\n', (1148, 1329), False, 'from ctc_decoders import Scorer, ctc_beam_search_decoder_batch\n'), ((1604, 1625), 'numpy.zeros_like', 'np.zeros_like', (['logits'], {}), '(logits)\n', (1617, 1625), True, 'import numpy as np\n'), ((573, 630), 'ctc_decoders.Scorer', 'Scorer', (['alpha', 'beta'], {'model_path': 'lm_path', 'vocabulary': 'vocab'}), '(alpha, beta, model_path=lm_path, vocabulary=vocab)\n', (579, 630), False, 'from ctc_decoders import Scorer, ctc_beam_search_decoder_batch\n'), ((2417, 2452), 'torch.log_softmax', 'torch.log_softmax', (['log_prob'], {'dim': '(-1)'}), '(log_prob, dim=-1)\n', (2434, 2452), False, 'import torch\n'), ((2475, 2502), 'torch.IntTensor', 'torch.IntTensor', (['[100, 200]'], {}), '([100, 200])\n', (2490, 2502), False, 'import torch\n'), ((1726, 1738), 'numpy.max', 'np.max', (['item'], {}), '(item)\n', (1732, 1738), True, 'import numpy as np\n')] |
import pytest
import os
import toml
from tempfile import gettempdir
import neo.libs.login
from neo.libs import login
class TestLogin:
def test_check_env(self, fs):
home = os.path.expanduser("~")
fs.create_file(os.path.join(home, ".neo", "config.toml"))
assert login.check_env()
def fake_load_env_file(self):
pass
def fake_check_env(self):
return True
def dummy_config_toml(self):
config = ""
config += "[auth]\n"
config += "os_username = 'john'\n"
config += "os_password = '<PASSWORD>'\n"
config += "\n"
config += "[region.wjv]\n"
config += "os_auth_url = 'https://foo.id:443/v1'\n"
config += "os_project_id = 'g7ia30trlk'\n"
config += "os_user_domain_name = 'foo.id'\n"
config += "status = 'ACTIVE'\n"
config += "[region.jkt]\n"
config += "os_auth_url = 'https://bar.id:443/v1'\n"
config += "os_project_id = 'iqn1a69tolj'\n"
config += "os_user_domain_name = 'bar.id'\n"
config += "status = 'IDLE'\n"
config += "\n"
return toml.loads(config)
def test_get_env_values(self, monkeypatch):
monkeypatch.setattr(neo.libs.login, "load_env_file", self.dummy_config_toml)
monkeypatch.setattr(neo.libs.login, "check_env", self.fake_check_env)
assert login.get_env_values()
def fake_get_env_values(self):
env = [
{
"username": "john",
"password": "<PASSWORD>",
"region": "zone-1",
"auth_url": "https://foo.id:443/v1",
"project_id": "g7ia30trlk",
"user_domain_name": "foo.id",
"status": "ACTIVE",
},
{
"username": "john",
"password": "<PASSWORD>",
"region": "zone-2",
"auth_url": "https://bar.id:443/v1",
"project_id": "iqn1a69tolj",
"user_domain_name": "bar.id",
"status": "IDLE",
},
]
return env
def test_is_current_env(self, monkeypatch):
monkeypatch.setattr(neo.libs.login, "get_env_values", self.fake_get_env_values)
assert login.is_current_env("https://foo.id:443/v1", "foo.id", "john")
def test_is_current_env_false(self, monkeypatch):
monkeypatch.setattr(neo.libs.login, "get_env_values", self.fake_get_env_values)
assert login.is_current_env("https://bar.id:443/v1", "bar.id", "merry") is None
def fake_check_session(self):
return True
def test_do_logout(self, monkeypatch, fs):
monkeypatch.setattr(neo.libs.login, "check_session", self.fake_check_session)
home = os.path.expanduser("~")
tmp_dir = os.path.join(gettempdir(), ".neo")
fs.create_file(tmp_dir + "/session.pkl")
fs.create_file(os.path.join(home, ".neo", "config.toml"))
assert os.path.exists(tmp_dir + "/session.pkl")
assert os.path.exists(os.path.join(home, ".neo", "config.toml"))
login.do_logout()
assert os.path.exists(tmp_dir + "/session.pkl") is False
assert os.path.exists(os.path.join(home, ".neo", "config.toml")) is False
def test_check_session(self, fs):
tmp_dir = os.path.join(gettempdir(), ".neo")
fs.create_file(tmp_dir + "/session.pkl")
assert login.check_session()
| [
"os.path.exists",
"neo.libs.login.check_session",
"neo.libs.login.check_env",
"os.path.join",
"toml.loads",
"tempfile.gettempdir",
"neo.libs.login.get_env_values",
"neo.libs.login.do_logout",
"neo.libs.login.is_current_env",
"os.path.expanduser"
] | [((186, 209), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (204, 209), False, 'import os\n'), ((291, 308), 'neo.libs.login.check_env', 'login.check_env', ([], {}), '()\n', (306, 308), False, 'from neo.libs import login\n'), ((1121, 1139), 'toml.loads', 'toml.loads', (['config'], {}), '(config)\n', (1131, 1139), False, 'import toml\n'), ((1368, 1390), 'neo.libs.login.get_env_values', 'login.get_env_values', ([], {}), '()\n', (1388, 1390), False, 'from neo.libs import login\n'), ((2268, 2331), 'neo.libs.login.is_current_env', 'login.is_current_env', (['"""https://foo.id:443/v1"""', '"""foo.id"""', '"""john"""'], {}), "('https://foo.id:443/v1', 'foo.id', 'john')\n", (2288, 2331), False, 'from neo.libs import login\n'), ((2768, 2791), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (2786, 2791), False, 'import os\n'), ((2977, 3017), 'os.path.exists', 'os.path.exists', (["(tmp_dir + '/session.pkl')"], {}), "(tmp_dir + '/session.pkl')\n", (2991, 3017), False, 'import os\n'), ((3100, 3117), 'neo.libs.login.do_logout', 'login.do_logout', ([], {}), '()\n', (3115, 3117), False, 'from neo.libs import login\n'), ((3422, 3443), 'neo.libs.login.check_session', 'login.check_session', ([], {}), '()\n', (3441, 3443), False, 'from neo.libs import login\n'), ((233, 274), 'os.path.join', 'os.path.join', (['home', '""".neo"""', '"""config.toml"""'], {}), "(home, '.neo', 'config.toml')\n", (245, 274), False, 'import os\n'), ((2490, 2554), 'neo.libs.login.is_current_env', 'login.is_current_env', (['"""https://bar.id:443/v1"""', '"""bar.id"""', '"""merry"""'], {}), "('https://bar.id:443/v1', 'bar.id', 'merry')\n", (2510, 2554), False, 'from neo.libs import login\n'), ((2823, 2835), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (2833, 2835), False, 'from tempfile import gettempdir\n'), ((2918, 2959), 'os.path.join', 'os.path.join', (['home', '""".neo"""', '"""config.toml"""'], {}), "(home, '.neo', 'config.toml')\n", (2930, 2959), False, 'import os\n'), ((3048, 3089), 'os.path.join', 'os.path.join', (['home', '""".neo"""', '"""config.toml"""'], {}), "(home, '.neo', 'config.toml')\n", (3060, 3089), False, 'import os\n'), ((3134, 3174), 'os.path.exists', 'os.path.exists', (["(tmp_dir + '/session.pkl')"], {}), "(tmp_dir + '/session.pkl')\n", (3148, 3174), False, 'import os\n'), ((3336, 3348), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (3346, 3348), False, 'from tempfile import gettempdir\n'), ((3214, 3255), 'os.path.join', 'os.path.join', (['home', '""".neo"""', '"""config.toml"""'], {}), "(home, '.neo', 'config.toml')\n", (3226, 3255), False, 'import os\n')] |
import os
from SSHLibrary import SSHLibrary
from constants import (
HPC_IP,
HPC_USERNAME,
HPC_KEY_PATH,
HPC_HOME_PATH,
)
# The Service broker will use the SSHCommunication class
# to communicate with the HPC environment
# This is just a dummy implementation
# TODO: Improve the code and implement appropriate error handling
class SSHCommunication:
def __init__(self):
# print(f"SSH Communication Constructor:")
self.__ssh = SSHLibrary()
if not os.path.exists(HPC_KEY_PATH) or not os.path.isfile(HPC_KEY_PATH):
print(f"{HPC_KEY_PATH} file does not exist or is not a readable file!")
exit(1)
self.__connect_with_public_key(host=HPC_IP,
username=HPC_USERNAME,
keyfile=HPC_KEY_PATH)
self.home_path = HPC_HOME_PATH
# Example code to create a connection with a username and password
def __connect_login(self, host, username, password):
self.__connection_index = self.__ssh.open_connection(host=host)
self.__login = self.__ssh.login(username=username,
password=password)
def __connect_with_public_key(self, host, username, keyfile):
self.__connection_index = self.__ssh.open_connection(host=host)
self.__login = self.__ssh.login_with_public_key(username=username,
keyfile=keyfile,
allow_agent=True)
# print(f"Login: {self.__login}")
# TODO: Handle the output and return_code instead of just returning them
# Execute blocking commands
# Waiting for an output and return_code
def execute_blocking(self, command="ls -la", return_stdout=True, return_stderr=True, return_rc=True):
output, err, return_code = self.__ssh.execute_command(command=command,
return_stdout=return_stdout,
return_stderr=return_stderr,
return_rc=return_rc)
return output, err, return_code
# Execute non-blocking commands
# Does not return anything as expected
def execute_non_blocking(self, command="ls -la"):
self.__ssh.start_command(command)
# TODO: Improve the wrappers and set the defaults appropriately
# The next few functions are wrapper functions with simplified parameters
# We can abstract some parameters as constants to simplify the signature
def put_file(self, source, destination):
mode = "0744"
scp = "ON"
scp_preserve_times = True
self.__ssh.put_file(source=source,
destination=destination,
mode=mode,
scp=scp,
scp_preserve_times=scp_preserve_times)
def put_directory(self, source, destination, recursive=True):
mode = "0744"
scp = "ON"
scp_preserve_times = True
self.__ssh.put_directory(source=source,
destination=destination,
mode=mode,
recursive=recursive,
scp=scp,
scp_preserve_times=scp_preserve_times)
def get_file(self, source, destination):
scp = "ON"
scp_preserve_times = True
self.__ssh.get_file(source=source,
destination=destination,
scp=scp,
scp_preserve_times=scp_preserve_times)
def get_directory(self, source, destination, recursive=True):
scp = "ON"
scp_preserve_times = True
self.__ssh.get_directory(source=source,
destination=destination,
recursive=recursive,
scp=scp,
scp_preserve_times=scp_preserve_times)
| [
"os.path.isfile",
"os.path.exists",
"SSHLibrary.SSHLibrary"
] | [((464, 476), 'SSHLibrary.SSHLibrary', 'SSHLibrary', ([], {}), '()\n', (474, 476), False, 'from SSHLibrary import SSHLibrary\n'), ((492, 520), 'os.path.exists', 'os.path.exists', (['HPC_KEY_PATH'], {}), '(HPC_KEY_PATH)\n', (506, 520), False, 'import os\n'), ((528, 556), 'os.path.isfile', 'os.path.isfile', (['HPC_KEY_PATH'], {}), '(HPC_KEY_PATH)\n', (542, 556), False, 'import os\n')] |
import random
from PIL import Image
def roll_dice():
return random.randint(1, 6)
def potential_moves():
duplicated_moves = []
for i in range(1, 7):
for j in range(1, 7):
duplicated_moves.append(i*j)
valid_moves = sorted(set(duplicated_moves))
return valid_moves
if __name__ == "__main__":
# ===========
# Demo to check I can do random dice rolls
dice_moves = roll_dice()
dice_multipler = roll_dice()
moves = dice_moves * dice_multipler
print("You rolled [ {} | {} ], so your piece moves [ {} ] around the board.".format(dice_moves, dice_multipler, moves))
# ============
# Let's draw out the potential squares you can land on. Created a lookup table of coordinates, starting from
# bottom-right of the monopoly board, and working clockwise, as you would go if you were playing it.
# Generated the coords with an algorithm, and decided for speed to LUT instead.
monopoly = [
(352, 352),
(16+(33*9), 352),
(16+(33*8), 352),
(16+(33*7), 352),
(16+(33*6), 352),
(16+(33*5), 352),
(16+(33*4), 352),
(16+(33*3), 352),
(16+(33*2), 352),
(16+(33*1), 352),
(10, 352),
(10, (48 + (33 * 8))),
(10, (48 + (33 * 7))),
(10, (48 + (33 * 6))),
(10, (48 + (33 * 5))),
(10, (48 + (33 * 4))),
(10, (48 + (33 * 3))),
(10, (48 + (33 * 2))),
(10, (48 + (33 * 1))),
(10, (48 + (33 * 0))),
(10, 10),
(16 + (33 * 1), 10),
(16 + (33 * 2), 10),
(16 + (33 * 3), 10),
(16 + (33 * 4), 10),
(16 + (33 * 5), 10),
(16 + (33 * 6), 10),
(16 + (33 * 7), 10),
(16 + (33 * 8), 10),
(16 + (33 * 9), 10),
(352, 10),
(352, 48 + (33 * 0)),
(352, 48 + (33 * 1)),
(352, 48 + (33 * 2)),
(352, 48 + (33 * 3)),
(352, 48 + (33 * 4)),
(352, 48 + (33 * 5)),
(352, 48 + (33 * 6)),
(352, 48 + (33 * 7)),
(352, 48 + (33 * 8)),
]
# Loading the template image file to see where you can and can't move to.
template = Image.open("templates/actual_board.jpg")
# Load the tick and cross symbols
tick = Image.open("templates/tick.png")
cross = Image.open("templates/cross.png")
# Anywhere that you can potentially move to on the first roll, mark as a tick.
for move in potential_moves():
#move = (move + 4) % 40
#print(move)
template.paste(tick, monopoly[move+1], tick)
# Show the image
template.show() | [
"PIL.Image.open",
"random.randint"
] | [((65, 85), 'random.randint', 'random.randint', (['(1)', '(6)'], {}), '(1, 6)\n', (79, 85), False, 'import random\n'), ((2186, 2226), 'PIL.Image.open', 'Image.open', (['"""templates/actual_board.jpg"""'], {}), "('templates/actual_board.jpg')\n", (2196, 2226), False, 'from PIL import Image\n'), ((2277, 2309), 'PIL.Image.open', 'Image.open', (['"""templates/tick.png"""'], {}), "('templates/tick.png')\n", (2287, 2309), False, 'from PIL import Image\n'), ((2322, 2355), 'PIL.Image.open', 'Image.open', (['"""templates/cross.png"""'], {}), "('templates/cross.png')\n", (2332, 2355), False, 'from PIL import Image\n')] |
# pylint: disable=no-name-in-module,import-error
"""
Copyright 2017-2018 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
from distutils.core import setup
from setuptools import find_packages
from setuptools.command.install import install
DESCRIPTION = "Icetea - test framework"
OWNER_NAMES = "<NAME>"
OWNER_EMAILS = "<EMAIL>"
VERSION = "2.0.1"
def read(fname):
"""
Utility function to cat in a file
:param fname: filename
:return: file content as a String
"""
return open(os.path.join(os.path.dirname(__file__), fname)).read()
INSTALL_REQUIRES = [
"prettytable<1.0",
"requests",
"yattag>=1.0,<2.0",
"pyserial>2.5",
"jsonmerge>=1.4.0,<2.0",
"jsonschema<3.0.0",
"mbed-ls>=1.5.1,<2.0",
"semver>=2.0,<3.0",
"mbed-flasher>=0.10.1,<0.11",
"six>=1.0,<2.0",
"pydash>=4.0,<5.0",
"transitions<1.0"
]
TEST_REQUIRES = [
"coverage>=4.0,<5.0",
"mock>=2.0,<3.0",
"sphinx>=1.0,<2.0",
"lxml",
"pylint>=1.0,<2.0",
"astroid>=1.0,<2.0"
]
class VerifyVersionCommand(install):
"""
Custom command to verify that the git tag matches our version
"""
description = "verify that the git tag matches our version"
def run(self):
is_ci = os.getenv("CIRCLECI")
if is_ci:
tag = os.getenv("CIRCLE_TAG")
version = "v" + VERSION
if tag != version:
info = "Git tag: {0} does not match the"\
"version of this app: {1}".format(tag, version)
sys.exit(info)
# else: you are your own - please do not publish any releases without tag!
setup(name="icetea",
version=VERSION,
description=DESCRIPTION,
long_description=read("README.md"),
long_description_content_type='text/markdown',
author=OWNER_NAMES,
author_email=OWNER_EMAILS,
maintainer=OWNER_NAMES,
maintainer_email=OWNER_EMAILS,
url="https://github.com/ARMmbed/icetea.git",
packages=find_packages(include=["icetea_lib.*", "icetea_lib"]),
data_files=[("icetea_lib", ["icetea_lib/tc_schema.json", "icetea_lib/logging_schema.json"])],
include_package_data=True,
keywords="armbed mbed-os mbed-cli ci framework testing automation",
license="(R) ARM",
tests_require=TEST_REQUIRES,
test_suite="test",
entry_points={
"console_scripts": [
"icetea=icetea_lib:icetea_main"
]
},
install_requires=INSTALL_REQUIRES,
classifiers=[
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
cmdclass={
"verify": VerifyVersionCommand,
}
)
| [
"os.path.dirname",
"setuptools.find_packages",
"os.getenv",
"sys.exit"
] | [((1756, 1777), 'os.getenv', 'os.getenv', (['"""CIRCLECI"""'], {}), "('CIRCLECI')\n", (1765, 1777), False, 'import os\n'), ((2512, 2565), 'setuptools.find_packages', 'find_packages', ([], {'include': "['icetea_lib.*', 'icetea_lib']"}), "(include=['icetea_lib.*', 'icetea_lib'])\n", (2525, 2565), False, 'from setuptools import find_packages\n'), ((1814, 1837), 'os.getenv', 'os.getenv', (['"""CIRCLE_TAG"""'], {}), "('CIRCLE_TAG')\n", (1823, 1837), False, 'import os\n'), ((2050, 2064), 'sys.exit', 'sys.exit', (['info'], {}), '(info)\n', (2058, 2064), False, 'import sys\n'), ((1028, 1053), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1043, 1053), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""
tests.middlwares.test_session
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests session middleware
:copyright: (c) 2015 by <NAME>.
:license: BSD, see LICENSE for more details.
"""
from anillo.middlewares.default_headers import wrap_default_headers
from anillo.http.request import Request
from anillo.http.responses import Response
@wrap_default_headers({"in-test": "in-test-value"}, {"out-test": "out-test-value"})
def session_app(request):
if request.headers['in-test'] == "in-test-other-value":
return Response(headers={"out-test": "out-test-other-value"})
else:
return Response()
def test_default_header_without_headers():
request = Request()
response = session_app(request)
assert "in-test" in request.headers
assert request.headers['in-test'] == "in-test-value"
assert "out-test" in response.headers
assert response.headers['out-test'] == "out-test-value"
def test_default_header_with_headers():
request = Request(headers={"in-test": "in-test-other-value"})
response = session_app(request)
assert "in-test" in request.headers
assert request.headers['in-test'] == "in-test-other-value"
assert "out-test" in response.headers
assert response.headers['out-test'] == "out-test-other-value"
| [
"anillo.middlewares.default_headers.wrap_default_headers",
"anillo.http.responses.Response",
"anillo.http.request.Request"
] | [((371, 457), 'anillo.middlewares.default_headers.wrap_default_headers', 'wrap_default_headers', (["{'in-test': 'in-test-value'}", "{'out-test': 'out-test-value'}"], {}), "({'in-test': 'in-test-value'}, {'out-test':\n 'out-test-value'})\n", (391, 457), False, 'from anillo.middlewares.default_headers import wrap_default_headers\n'), ((705, 714), 'anillo.http.request.Request', 'Request', ([], {}), '()\n', (712, 714), False, 'from anillo.http.request import Request\n'), ((1006, 1057), 'anillo.http.request.Request', 'Request', ([], {'headers': "{'in-test': 'in-test-other-value'}"}), "(headers={'in-test': 'in-test-other-value'})\n", (1013, 1057), False, 'from anillo.http.request import Request\n'), ((555, 609), 'anillo.http.responses.Response', 'Response', ([], {'headers': "{'out-test': 'out-test-other-value'}"}), "(headers={'out-test': 'out-test-other-value'})\n", (563, 609), False, 'from anillo.http.responses import Response\n'), ((635, 645), 'anillo.http.responses.Response', 'Response', ([], {}), '()\n', (643, 645), False, 'from anillo.http.responses import Response\n')] |
from .models import Signal
from rest_framework import viewsets
from dashboard.quickstart.serializers import SignalSerializer
from django.utils import timezone
class SignalViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows Signal to be viewed or edited.
"""
queryset = Signal.objects.filter(
time_recieved__startswith=timezone.now().date())
serializer_class = SignalSerializer
| [
"django.utils.timezone.now"
] | [((353, 367), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (365, 367), False, 'from django.utils import timezone\n')] |
import socket
import constants
import serial
import time
from datetime import date, datetime
import errno
import select
from socket import error as socket_error
print("======================================")
print("Setting up Serial between Raspberry Pi and Leonardo.")
ser = serial.Serial(constants.SERIAL_PORT, constants.SERIAL_BUADRATE)
print("Serial port information: ")
print(" Port: " + str(constants.SERIAL_PORT))
print(" Buadrate: " + str(constants.SERIAL_BUADRATE))
serverAddressPort = (constants.SERVER_UDP_IP, constants.SERVER_UDP_PORT)
# Create a UDP socket at client side
clientSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
clientSock.bind(('', 30020))
clientSock.setblocking(0)
print("======================================")
print("UDP client setup and ready to communicate")
def processData(dataRecvd):
#Split only first seperator, then route data
splitData = dataRecvd.split(',', 1)
if (splitData[0] == 'm'):
print("MOVEMENT")
# Movement data
# Send serial movement data to arduino
dataToSend = ('<'+splitData[1]+'>').encode('utf-8')
ser.write(dataToSend)
if (splitData[0] == 's'):
print("STATUS")
# Status
# Send data back to server with % and others
if (splitData[0] == 'g'):
print("GYRO")
# Gyro
# (Unsure if this will be connected to the raspberry pi or arduino)
while(True):
try:
# Send to server using created UDP socket
dataSend = "" # Keep Alive
print("[{}:{}:{}] S {} <- {}".format(datetime.now().hour, datetime.now().minute, datetime.now().second, serverAddressPort, dataSend))
clientSock.sendto(str.encode(dataSend), serverAddressPort)
# Recieve with a timeout (in this case 1s)
dataReady = select.select([clientSock], [], [], 0.25)
if dataReady[0]:
dataRecvdTupl = clientSock.recvfrom(constants.BUFFER_SIZE)
dataRecvd = dataRecvdTupl[0].decode("utf-8")
print("[{}:{}:{}] R {} -> {}".format(datetime.now().hour, datetime.now().minute, datetime.now().second, dataRecvdTupl[1], dataRecvd))
processData(dataRecvd)
except socket_error as e:
#Riase error if its not a connection error
if e.errno == errno.ECONNREFUSED:
print("[{}:{}:{}] Error: Connection refused".format(datetime.now().hour, datetime.now().minute, datetime.now().second))
elif e.errno == errno.WSAECONNRESET:
print("[{}:{}:{}] Error: Connection forcibly closed".format(datetime.now().hour, datetime.now().minute, datetime.now().second))
else:
raise e
time.sleep(1)
| [
"select.select",
"socket.socket",
"time.sleep",
"datetime.datetime.now",
"serial.Serial"
] | [((278, 341), 'serial.Serial', 'serial.Serial', (['constants.SERIAL_PORT', 'constants.SERIAL_BUADRATE'], {}), '(constants.SERIAL_PORT, constants.SERIAL_BUADRATE)\n', (291, 341), False, 'import serial\n'), ((607, 655), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (620, 655), False, 'import socket\n'), ((1803, 1844), 'select.select', 'select.select', (['[clientSock]', '[]', '[]', '(0.25)'], {}), '([clientSock], [], [], 0.25)\n', (1816, 1844), False, 'import select\n'), ((2662, 2675), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2672, 2675), False, 'import time\n'), ((1567, 1581), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1579, 1581), False, 'from datetime import date, datetime\n'), ((1588, 1602), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1600, 1602), False, 'from datetime import date, datetime\n'), ((1611, 1625), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1623, 1625), False, 'from datetime import date, datetime\n'), ((2047, 2061), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2059, 2061), False, 'from datetime import date, datetime\n'), ((2068, 2082), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2080, 2082), False, 'from datetime import date, datetime\n'), ((2091, 2105), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2103, 2105), False, 'from datetime import date, datetime\n'), ((2367, 2381), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2379, 2381), False, 'from datetime import date, datetime\n'), ((2388, 2402), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2400, 2402), False, 'from datetime import date, datetime\n'), ((2411, 2425), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2423, 2425), False, 'from datetime import date, datetime\n'), ((2552, 2566), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2564, 2566), False, 'from datetime import date, datetime\n'), ((2573, 2587), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2585, 2587), False, 'from datetime import date, datetime\n'), ((2596, 2610), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2608, 2610), False, 'from datetime import date, datetime\n')] |
from collections import namedtuple
# From pyenv.git/pylib/sr/vision.py
MARKER_ARENA, MARKER_ROBOT, MARKER_PEDESTAL, MARKER_TOKEN = range(4)
marker_offsets = {
MARKER_ARENA: 0,
MARKER_ROBOT: 28,
MARKER_PEDESTAL: 32,
MARKER_TOKEN: 41
}
marker_sizes = {
MARKER_ARENA: 0.25 * (10.0/12),
MARKER_ROBOT: 0.1 * (10.0/12),
MARKER_PEDESTAL: 0.2 * (10.0/12),
MARKER_TOKEN: 0.2 * (10.0/12)
}
# MarkerInfo class
MarkerInfo = namedtuple( "MarkerInfo", "code marker_type offset size" )
def create_marker_info_by_type(marker_type, offset):
return MarkerInfo(marker_type = marker_type,
offset = offset,
size = marker_sizes[marker_type],
code = marker_offsets[marker_type] + offset)
# Points
# TODO: World Coordinates
PolarCoord = namedtuple("PolarCoord", "length rot_y")
Point = namedtuple("Point", "polar")
# Marker class
MarkerBase = namedtuple( "Marker", "info res centre timestamp" )
class Marker(MarkerBase):
def __init__(self, *a, **kwd):
# Aliases
self.dist = self.centre.polar.length
self.rot_y = self.centre.polar.rot_y
| [
"collections.namedtuple"
] | [((448, 504), 'collections.namedtuple', 'namedtuple', (['"""MarkerInfo"""', '"""code marker_type offset size"""'], {}), "('MarkerInfo', 'code marker_type offset size')\n", (458, 504), False, 'from collections import namedtuple\n'), ((821, 861), 'collections.namedtuple', 'namedtuple', (['"""PolarCoord"""', '"""length rot_y"""'], {}), "('PolarCoord', 'length rot_y')\n", (831, 861), False, 'from collections import namedtuple\n'), ((870, 898), 'collections.namedtuple', 'namedtuple', (['"""Point"""', '"""polar"""'], {}), "('Point', 'polar')\n", (880, 898), False, 'from collections import namedtuple\n'), ((928, 977), 'collections.namedtuple', 'namedtuple', (['"""Marker"""', '"""info res centre timestamp"""'], {}), "('Marker', 'info res centre timestamp')\n", (938, 977), False, 'from collections import namedtuple\n')] |
"""
Utility functions
"""
from datetime import datetime, timezone
def parse_date_string(date: str) -> datetime:
"""Converts date as string (e.g. "2004-05-25T02:19:28Z") to UNIX timestamp (uses UTC, always)
"""
# https://docs.python.org/3.6/library/datetime.html#strftime-strptime-behavior
# http://strftime.org/
parsed = datetime.strptime(date, '%Y-%m-%dT%H:%M:%SZ') # string parse time
# now apply UTC timezone
return parsed.replace(tzinfo=timezone.utc)
| [
"datetime.datetime.strptime"
] | [((343, 388), 'datetime.datetime.strptime', 'datetime.strptime', (['date', '"""%Y-%m-%dT%H:%M:%SZ"""'], {}), "(date, '%Y-%m-%dT%H:%M:%SZ')\n", (360, 388), False, 'from datetime import datetime, timezone\n')] |
def obfuscate(utils_path, project_path):
import subprocess
print('Running obfuscator ...')
subprocess.run(f'{utils_path}/confuser/Confuser.CLI.exe {project_path} -n')
| [
"subprocess.run"
] | [((104, 179), 'subprocess.run', 'subprocess.run', (['f"""{utils_path}/confuser/Confuser.CLI.exe {project_path} -n"""'], {}), "(f'{utils_path}/confuser/Confuser.CLI.exe {project_path} -n')\n", (118, 179), False, 'import subprocess\n')] |
# -*- coding: utf-8 -*-
#@+leo-ver=5-thin
#@+node:ekr.20201129023817.1: * @file leoTest2.py
#@@first
"""
Support for Leo's new unit tests, contained in leo/unittests/test_*.py.
Run these tests using unittest or pytest from the command line.
See g.run_unit_tests and g.run_coverage_tests.
This file also contains classes that convert @test nodes in unitTest.leo to
tests in leo/unittest. Eventually these classes will move to scripts.leo.
"""
import time
import unittest
from leo.core import leoGlobals as g
from leo.core import leoApp
#@+others
#@+node:ekr.20201130195111.1: ** function.create_app
def create_app(gui_name='null'):
"""
Create the Leo application, g.app, the Gui, g.app.gui, and a commander.
This method is expensive (0.5 sec) only the first time it is called.
Thereafter, recreating g.app, g.app.gui, and new commands is fast.
"""
trace = False
t1 = time.process_time()
#
# Set g.unitTesting *early*, for guards, to suppress the splash screen, etc.
g.unitTesting = True
# Create g.app now, to avoid circular dependencies.
g.app = leoApp.LeoApp()
# Late imports.
from leo.core import leoConfig
from leo.core import leoNodes
from leo.core import leoCommands
from leo.core.leoGui import NullGui
if gui_name == 'qt':
from leo.plugins.qt_gui import LeoQtGui
t2 = time.process_time()
g.app.recentFilesManager = leoApp.RecentFilesManager()
g.app.loadManager = lm = leoApp.LoadManager()
lm.computeStandardDirectories()
if not g.app.setLeoID(useDialog=False, verbose=True):
raise ValueError("unable to set LeoID.")
g.app.nodeIndices = leoNodes.NodeIndices(g.app.leoID)
g.app.config = leoConfig.GlobalConfigManager()
g.app.db = g.NullObject('g.app.db')
g.app.pluginsController = g.NullObject('g.app.pluginsController')
g.app.commander_cacher = g.NullObject('g.app.commander_cacher')
if gui_name == 'null':
g.app.gui = NullGui()
elif gui_name == 'qt':
g.app.gui = LeoQtGui()
else:
raise TypeError(f"create_gui: unknown gui_name: {gui_name!r}")
t3 = time.process_time()
# Create a dummy commander, to do the imports in c.initObjects.
# Always use a null gui to avoid screen flash.
# setUp will create another commander.
c = leoCommands.Commands(fileName=None, gui=g.app.gui)
# Create minimal config dictionaries.
settings_d, bindings_d = lm.createDefaultSettingsDicts()
lm.globalSettingsDict = settings_d
lm.globalBindingsDict = bindings_d
c.config.settingsDict = settings_d
c.config.bindingsDict = bindings_d
assert g.unitTesting is True # Defensive.
t4 = time.process_time()
# Trace times. This trace happens only once:
# imports: 0.016
# gui: 0.000
# commander: 0.469
# total: 0.484
if trace and t4 - t3 > 0.1:
print('create_app:\n'
f" imports: {(t2-t1):.3f}\n"
f" gui: {(t3-t2):.3f}\n"
f"commander: {(t4-t2):.3f}\n"
f" total: {(t4-t1):.3f}\n")
return c
#@+node:ekr.20210902014907.1: ** class LeoUnitTest(unittest.TestCase)
class LeoUnitTest(unittest.TestCase):
"""
The base class for all unit tests in Leo.
Contains setUp/tearDown methods and various utilites.
"""
#@+others
#@+node:ekr.20210901140855.2: *3* LeoUnitTest.setUp, tearDown & setUpClass
@classmethod
def setUpClass(cls):
create_app(gui_name='null')
def setUp(self):
"""
Create a commander using a **null** gui, regardless of g.app.gui.
Create the nodes in the commander.
"""
# Do the import here to avoid circular dependencies.
from leo.core import leoCommands
from leo.core.leoGui import NullGui
# Set g.unitTesting *early*, for guards.
g.unitTesting = True
# Create a new commander for each test.
# This is fast, because setUpClass has done all the imports.
self.c = c = leoCommands.Commands(fileName=None, gui=NullGui())
# Init the 'root' and '@settings' nodes.
self.root_p = c.rootPosition()
self.root_p.h = 'root'
self.settings_p = self.root_p.insertAfter()
self.settings_p.h = '@settings'
# Select the 'root' node.
c.selectPosition(self.root_p)
def tearDown(self):
self.c = None
#@+node:ekr.20210830151601.1: *3* LeoUnitTest.create_test_outline
def create_test_outline(self):
p = self.c.p
# Create the following outline:
#
# root
# child clone a
# node clone 1
# child b
# child clone a
# node clone 1
# child c
# node clone 1
# child clone a
# node clone 1
# child b
# child clone a
# node clone 1
assert p == self.root_p
assert p.h == 'root'
# Child a
child_clone_a = p.insertAsLastChild()
child_clone_a.h = 'child clone a'
node_clone_1 = child_clone_a.insertAsLastChild()
node_clone_1.h = 'node clone 1'
# Child b
child_b = p.insertAsLastChild()
child_b.h = 'child b'
# Clone 'child clone a'
clone = child_clone_a.clone()
clone.moveToLastChildOf(child_b)
# Child c
child_c = p.insertAsLastChild()
child_c.h = 'child c'
# Clone 'node clone 1'
clone = node_clone_1.clone()
clone.moveToLastChildOf(child_c)
# Clone 'child clone a'
clone = child_clone_a.clone()
clone.moveToLastChildOf(p)
# Clone 'child b'
clone = child_b.clone()
clone.moveToLastChildOf(p)
#@+node:ekr.20210831101111.1: *3* LeoUnitTest.dump_tree
def dump_tree(self, tag=''):
c = self.c
print('')
g.trace(tag)
for p in c.all_positions():
print(f"clone? {int(p.isCloned())} {' '*p.level()} {p.h}")
#@-others
#@-others
#@-leo
| [
"leo.core.leoNodes.NodeIndices",
"leo.core.leoApp.LoadManager",
"leo.core.leoApp.LeoApp",
"leo.core.leoApp.RecentFilesManager",
"leo.core.leoCommands.Commands",
"leo.core.leoGui.NullGui",
"leo.core.leoGlobals.trace",
"leo.plugins.qt_gui.LeoQtGui",
"leo.core.leoConfig.GlobalConfigManager",
"leo.cor... | [((899, 918), 'time.process_time', 'time.process_time', ([], {}), '()\n', (916, 918), False, 'import time\n'), ((1099, 1114), 'leo.core.leoApp.LeoApp', 'leoApp.LeoApp', ([], {}), '()\n', (1112, 1114), False, 'from leo.core import leoApp\n'), ((1363, 1382), 'time.process_time', 'time.process_time', ([], {}), '()\n', (1380, 1382), False, 'import time\n'), ((1414, 1441), 'leo.core.leoApp.RecentFilesManager', 'leoApp.RecentFilesManager', ([], {}), '()\n', (1439, 1441), False, 'from leo.core import leoApp\n'), ((1471, 1491), 'leo.core.leoApp.LoadManager', 'leoApp.LoadManager', ([], {}), '()\n', (1489, 1491), False, 'from leo.core import leoApp\n'), ((1659, 1692), 'leo.core.leoNodes.NodeIndices', 'leoNodes.NodeIndices', (['g.app.leoID'], {}), '(g.app.leoID)\n', (1679, 1692), False, 'from leo.core import leoNodes\n'), ((1712, 1743), 'leo.core.leoConfig.GlobalConfigManager', 'leoConfig.GlobalConfigManager', ([], {}), '()\n', (1741, 1743), False, 'from leo.core import leoConfig\n'), ((1759, 1783), 'leo.core.leoGlobals.NullObject', 'g.NullObject', (['"""g.app.db"""'], {}), "('g.app.db')\n", (1771, 1783), True, 'from leo.core import leoGlobals as g\n'), ((1814, 1853), 'leo.core.leoGlobals.NullObject', 'g.NullObject', (['"""g.app.pluginsController"""'], {}), "('g.app.pluginsController')\n", (1826, 1853), True, 'from leo.core import leoGlobals as g\n'), ((1883, 1921), 'leo.core.leoGlobals.NullObject', 'g.NullObject', (['"""g.app.commander_cacher"""'], {}), "('g.app.commander_cacher')\n", (1895, 1921), True, 'from leo.core import leoGlobals as g\n'), ((2127, 2146), 'time.process_time', 'time.process_time', ([], {}), '()\n', (2144, 2146), False, 'import time\n'), ((2317, 2367), 'leo.core.leoCommands.Commands', 'leoCommands.Commands', ([], {'fileName': 'None', 'gui': 'g.app.gui'}), '(fileName=None, gui=g.app.gui)\n', (2337, 2367), False, 'from leo.core import leoCommands\n'), ((2683, 2702), 'time.process_time', 'time.process_time', ([], {}), '()\n', (2700, 2702), False, 'import time\n'), ((1539, 1584), 'leo.core.leoGlobals.app.setLeoID', 'g.app.setLeoID', ([], {'useDialog': '(False)', 'verbose': '(True)'}), '(useDialog=False, verbose=True)\n', (1553, 1584), True, 'from leo.core import leoGlobals as g\n'), ((1969, 1978), 'leo.core.leoGui.NullGui', 'NullGui', ([], {}), '()\n', (1976, 1978), False, 'from leo.core.leoGui import NullGui\n'), ((5896, 5908), 'leo.core.leoGlobals.trace', 'g.trace', (['tag'], {}), '(tag)\n', (5903, 5908), True, 'from leo.core import leoGlobals as g\n'), ((2026, 2036), 'leo.plugins.qt_gui.LeoQtGui', 'LeoQtGui', ([], {}), '()\n', (2034, 2036), False, 'from leo.plugins.qt_gui import LeoQtGui\n'), ((4061, 4070), 'leo.core.leoGui.NullGui', 'NullGui', ([], {}), '()\n', (4068, 4070), False, 'from leo.core.leoGui import NullGui\n')] |
from setuptools import setup, find_packages
exec(open('counsyl_pyads/version.py').read())
setup(
name='counsyl-pyads',
version=__version__,
packages=find_packages(),
scripts=['bin/twincat_plc_info.py'],
include_package_data=True,
zip_safe=False,
author='<NAME>.',
author_email='<EMAIL>',
description='A library for directly interacting with a Twincat PLC.',
url='https://github.com/counsyl/counsyl-pyads/',
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Interface Engine/Protocol Translator',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| [
"setuptools.find_packages"
] | [((163, 178), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (176, 178), False, 'from setuptools import setup, find_packages\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from pykg2vec.core.KGMeta import ModelMeta, InferenceMeta
class RotatE(ModelMeta, InferenceMeta):
""" `Rotate-Knowledge graph embedding by relation rotation in complex space`_
RotatE models the entities and the relations in the complex vector space.
The translational relation in RotatE is defined as the element-wise 2D
rotation in which the head entity h will be rotated to the tail entity t by
multiplying the unit-length relation r in complex number form.
Args:
config (object): Model configuration parameters.
Attributes:
config (object): Model configuration.
data_stats (object): ModelMeta object instance. It consists of the knowledge graph metadata.
model_name (str): Name of the model.
Examples:
>>> from pykg2vec.core.RotatE import RotatE
>>> from pykg2vec.utils.trainer import Trainer
>>> model = RotatE()
>>> trainer = Trainer(model=model, debug=False)
>>> trainer.build_model()
>>> trainer.train_model()
.. _Rotate-Knowledge graph embedding by relation rotation in complex space:
https://openreview.net/pdf?id=HkgEQnRqYQ
"""
def __init__(self, config=None):
self.config = config
self.data_stats = self.config.kg_meta
self.model_name = 'RotatE'
def def_inputs(self):
"""Defines the inputs to the model.
Attributes:
pos_h (Tensor): Positive Head entities ids.
pos_r (Tensor): Positive Relation ids of the triple.
pos_t (Tensor): Positive Tail entity ids of the triple.
neg_h (Tensor): Negative Head entities ids.
neg_r (Tensor): Negative Relation ids of the triple.
neg_t (Tensor): Negative Tail entity ids of the triple.
test_h_batch (Tensor): Batch of head ids for testing.
test_r_batch (Tensor): Batch of relation ids for testing
test_t_batch (Tensor): Batch of tail ids for testing.
"""
self.pos_h = tf.placeholder(tf.int32, [None])
self.pos_t = tf.placeholder(tf.int32, [None])
self.pos_r = tf.placeholder(tf.int32, [None])
self.neg_h = tf.placeholder(tf.int32, [None])
self.neg_t = tf.placeholder(tf.int32, [None])
self.neg_r = tf.placeholder(tf.int32, [None])
self.test_h_batch = tf.placeholder(tf.int32, [None])
self.test_r_batch = tf.placeholder(tf.int32, [None])
self.test_t_batch = tf.placeholder(tf.int32, [None])
def def_parameters(self):
"""Defines the model parameters.
Attributes:
k (Tensor): Size of the latent dimesnion for entities and relations.
ent_embeddings_real (Tensor Variable): Lookup variable containing real values of the entities.
ent_embeddings_imag (Tensor Variable): Lookup variable containing imaginary values of the entities.
rel_embeddings_real (Tensor Variable): Lookup variable containing real values of the relations.
parameter_list (list): List of Tensor parameters.
"""
num_total_ent = self.data_stats.tot_entity
num_total_rel = self.data_stats.tot_relation
k = self.config.hidden_size
with tf.name_scope("embedding"):
self.ent_embeddings = tf.get_variable(name="ent_embeddings_real", shape=[num_total_ent, k],
initializer=tf.contrib.layers.xavier_initializer(uniform=False))
self.ent_embeddings_imag = tf.get_variable(name="ent_embeddings_imag", shape=[num_total_ent, k],
initializer=tf.contrib.layers.xavier_initializer(uniform=False))
self.rel_embeddings = tf.get_variable(name="rel_embeddings_real", shape=[num_total_rel, k],
initializer=tf.contrib.layers.xavier_initializer(uniform=False))
self.parameter_list = [self.ent_embeddings, self.ent_embeddings_imag, self.rel_embeddings]
def comp_mul_and_min(self, hr, hi, rr, ri, tr, ti):
"""Calculates training score for loss function.
Args:
hi(Tensor): Imaginary part of the head embedding.
hr(Tensor): Real part of the head embedding.
ri(Tensor): Imaginary part of the tail embedding.
rr(Tensor): Real part of the tail embedding.
ti(Tensor): Imaginary part of the relation embedding.
tr(Tensor): Real part of the relation embedding.
Returns:
Tensors: Returns a tensor
"""
score_r = hr * rr - hi * ri - tr
score_i = hr * ri + hi * rr - ti
return tf.reduce_sum(tf.sqrt(score_r ** 2 + score_i ** 2), -1)
def comp_mul_and_min_4_test(self, hr, hi, rr, ri, tr, ti):
"""Calculates test score for loss function.
Args:
hi(Tensor): Imaginary part of the head embedding.
hr(Tensor): Real part of the head embedding.
ri(Tensor): Imaginary part of the tail embedding.
rr(Tensor): Real part of the tail embedding.
ti(Tensor): Imaginary part of the relation embedding.
tr(Tensor): Real part of the relation embedding.
Returns:
Tensors: Returns a tensor
"""
rr = tf.expand_dims(rr, axis=1)
ri = tf.expand_dims(ri, axis=1)
score_r = tf.cond(tf.shape(hr)[0] < tf.shape(tr)[0],
lambda: tf.expand_dims(hr, axis=1)*rr-tf.expand_dims(hi, axis=1)*ri-tr,
lambda: hr*rr-hi*ri-tf.expand_dims(tr, axis=1))
score_i = tf.cond(tf.shape(hr)[0] < tf.shape(tr)[0],
lambda: tf.expand_dims(hr, axis=1) * ri + tf.expand_dims(hi, axis=1) * rr - ti,
lambda: hr * ri + hi * rr - tf.expand_dims(ti, axis=1))
return tf.reduce_sum(tf.sqrt(score_r ** 2 + score_i ** 2), -1)
def def_loss(self):
"""Defines the layers of the algorithm."""
(pos_h_e_r, pos_h_e_i), (pos_r_e_r, pos_r_e_i), (pos_t_e_r, pos_t_e_i) \
= self.embed(self.pos_h, self.pos_r, self.pos_t)
(neg_h_e_r, neg_h_e_i), (neg_r_e_r, neg_r_e_i), (neg_t_e_r, neg_t_e_i) \
= self.embed(self.neg_h, self.neg_r, self.neg_t)
pos_score = self.comp_mul_and_min(pos_h_e_r, pos_h_e_i, pos_r_e_r, pos_r_e_i, pos_t_e_r, pos_t_e_i)
neg_score = self.comp_mul_and_min(neg_h_e_r, neg_h_e_i, neg_r_e_r, neg_r_e_i, neg_t_e_r, neg_t_e_i)
self.loss = tf.reduce_sum(tf.maximum(pos_score + self.config.margin - neg_score, 0))
def test_batch(self):
"""Function that performs batch testing for the algorithm.
Returns:
Tensors: Returns ranks of head and tail.
"""
num_entity = self.data_stats.tot_entity
(h_vec_r, h_vec_i), (r_vec_r, r_vec_i), (t_vec_r, t_vec_i) \
= self.embed(self.test_h_batch, self.test_r_batch, self.test_t_batch)
head_pos_score = self.comp_mul_and_min_4_test(self.ent_embeddings, self.ent_embeddings_imag,
r_vec_r, r_vec_i, t_vec_r, t_vec_i)
tail_pos_score = self.comp_mul_and_min_4_test(h_vec_r, h_vec_i, r_vec_r, r_vec_i,
self.ent_embeddings, self.ent_embeddings_imag)
_, head_rank = tf.nn.top_k(head_pos_score, k=num_entity)
_, tail_rank = tf.nn.top_k(tail_pos_score, k=num_entity)
return head_rank, tail_rank
# Override
def dissimilarity(self, h, r, t):
"""Function to calculate dissimilarity measure in embedding space.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
t (Tensor): Tail entity ids of the triple.
Returns:
Tensors: Returns the dissimilarity measure.
"""
if self.config.L1_flag:
return tf.reduce_sum(tf.abs(h + r - t), axis=1) # L1 norm
else:
return tf.reduce_sum((h + r - t) ** 2, axis=1) # L2 norm
def embed(self, h, r, t):
"""Function to get the embedding value.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
t (Tensor): Tail entity ids of the triple.
Returns:
Tensors: Returns real and imaginary values of head, relation and tail embedding.
"""
pi = 3.14159265358979323846
h_e_r = tf.nn.embedding_lookup(self.ent_embeddings, h)
h_e_i = tf.nn.embedding_lookup(self.ent_embeddings_imag, h)
r_e_r = tf.nn.embedding_lookup(self.rel_embeddings, r)
t_e_r = tf.nn.embedding_lookup(self.ent_embeddings, t)
t_e_i = tf.nn.embedding_lookup(self.ent_embeddings_imag, t)
r_e_r = r_e_r / pi
r_e_i = tf.sin(r_e_r)
r_e_r = tf.cos(r_e_r)
return (h_e_r, h_e_i), (r_e_r, r_e_i), (t_e_r, t_e_i)
def get_embed(self, h, r, t, sess=None):
"""Function to get the embedding value in numpy.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
t (Tensor): Tail entity ids of the triple.
Returns:
Tensors: Returns real and imaginary values of head, relation and tail embedding.
"""
emb_h, emb_r, emb_t = self.embed(h, r, t)
h, r, t = sess.run([emb_h, emb_r, emb_t])
return h, r, t
def get_proj_embed(self, h, r, t, sess):
"""Function to get the projected embedding value in numpy.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
t (Tensor): Tail entity ids of the triple.
"""
return self.get_embed(h, r, t, sess)
| [
"tensorflow.nn.embedding_lookup",
"tensorflow.shape",
"tensorflow.reduce_sum",
"tensorflow.placeholder",
"tensorflow.nn.top_k",
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.sqrt",
"tensorflow.name_scope",
"tensorflow.maximum",
"tensorflow.expand_dims",
"tensorflow.sin",
"tensorf... | [((2303, 2335), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]'], {}), '(tf.int32, [None])\n', (2317, 2335), True, 'import tensorflow as tf\n'), ((2357, 2389), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]'], {}), '(tf.int32, [None])\n', (2371, 2389), True, 'import tensorflow as tf\n'), ((2411, 2443), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]'], {}), '(tf.int32, [None])\n', (2425, 2443), True, 'import tensorflow as tf\n'), ((2465, 2497), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]'], {}), '(tf.int32, [None])\n', (2479, 2497), True, 'import tensorflow as tf\n'), ((2519, 2551), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]'], {}), '(tf.int32, [None])\n', (2533, 2551), True, 'import tensorflow as tf\n'), ((2573, 2605), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]'], {}), '(tf.int32, [None])\n', (2587, 2605), True, 'import tensorflow as tf\n'), ((2635, 2667), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]'], {}), '(tf.int32, [None])\n', (2649, 2667), True, 'import tensorflow as tf\n'), ((2696, 2728), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]'], {}), '(tf.int32, [None])\n', (2710, 2728), True, 'import tensorflow as tf\n'), ((2757, 2789), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]'], {}), '(tf.int32, [None])\n', (2771, 2789), True, 'import tensorflow as tf\n'), ((5698, 5724), 'tensorflow.expand_dims', 'tf.expand_dims', (['rr'], {'axis': '(1)'}), '(rr, axis=1)\n', (5712, 5724), True, 'import tensorflow as tf\n'), ((5738, 5764), 'tensorflow.expand_dims', 'tf.expand_dims', (['ri'], {'axis': '(1)'}), '(ri, axis=1)\n', (5752, 5764), True, 'import tensorflow as tf\n'), ((7787, 7828), 'tensorflow.nn.top_k', 'tf.nn.top_k', (['head_pos_score'], {'k': 'num_entity'}), '(head_pos_score, k=num_entity)\n', (7798, 7828), True, 'import tensorflow as tf\n'), ((7852, 7893), 'tensorflow.nn.top_k', 'tf.nn.top_k', (['tail_pos_score'], {'k': 'num_entity'}), '(tail_pos_score, k=num_entity)\n', (7863, 7893), True, 'import tensorflow as tf\n'), ((8937, 8983), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['self.ent_embeddings', 'h'], {}), '(self.ent_embeddings, h)\n', (8959, 8983), True, 'import tensorflow as tf\n'), ((9000, 9051), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['self.ent_embeddings_imag', 'h'], {}), '(self.ent_embeddings_imag, h)\n', (9022, 9051), True, 'import tensorflow as tf\n'), ((9068, 9114), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['self.rel_embeddings', 'r'], {}), '(self.rel_embeddings, r)\n', (9090, 9114), True, 'import tensorflow as tf\n'), ((9131, 9177), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['self.ent_embeddings', 't'], {}), '(self.ent_embeddings, t)\n', (9153, 9177), True, 'import tensorflow as tf\n'), ((9194, 9245), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['self.ent_embeddings_imag', 't'], {}), '(self.ent_embeddings_imag, t)\n', (9216, 9245), True, 'import tensorflow as tf\n'), ((9289, 9302), 'tensorflow.sin', 'tf.sin', (['r_e_r'], {}), '(r_e_r)\n', (9295, 9302), True, 'import tensorflow as tf\n'), ((9319, 9332), 'tensorflow.cos', 'tf.cos', (['r_e_r'], {}), '(r_e_r)\n', (9325, 9332), True, 'import tensorflow as tf\n'), ((3539, 3565), 'tensorflow.name_scope', 'tf.name_scope', (['"""embedding"""'], {}), "('embedding')\n", (3552, 3565), True, 'import tensorflow as tf\n'), ((5042, 5078), 'tensorflow.sqrt', 'tf.sqrt', (['(score_r ** 2 + score_i ** 2)'], {}), '(score_r ** 2 + score_i ** 2)\n', (5049, 5078), True, 'import tensorflow as tf\n'), ((6280, 6316), 'tensorflow.sqrt', 'tf.sqrt', (['(score_r ** 2 + score_i ** 2)'], {}), '(score_r ** 2 + score_i ** 2)\n', (6287, 6316), True, 'import tensorflow as tf\n'), ((6935, 6992), 'tensorflow.maximum', 'tf.maximum', (['(pos_score + self.config.margin - neg_score)', '(0)'], {}), '(pos_score + self.config.margin - neg_score, 0)\n', (6945, 6992), True, 'import tensorflow as tf\n'), ((8447, 8486), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['((h + r - t) ** 2)'], {'axis': '(1)'}), '((h + r - t) ** 2, axis=1)\n', (8460, 8486), True, 'import tensorflow as tf\n'), ((8376, 8393), 'tensorflow.abs', 'tf.abs', (['(h + r - t)'], {}), '(h + r - t)\n', (8382, 8393), True, 'import tensorflow as tf\n'), ((3733, 3784), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {'uniform': '(False)'}), '(uniform=False)\n', (3769, 3784), True, 'import tensorflow as tf\n'), ((3962, 4013), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {'uniform': '(False)'}), '(uniform=False)\n', (3998, 4013), True, 'import tensorflow as tf\n'), ((4181, 4232), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {'uniform': '(False)'}), '(uniform=False)\n', (4217, 4232), True, 'import tensorflow as tf\n'), ((5792, 5804), 'tensorflow.shape', 'tf.shape', (['hr'], {}), '(hr)\n', (5800, 5804), True, 'import tensorflow as tf\n'), ((5810, 5822), 'tensorflow.shape', 'tf.shape', (['tr'], {}), '(tr)\n', (5818, 5822), True, 'import tensorflow as tf\n'), ((5971, 5997), 'tensorflow.expand_dims', 'tf.expand_dims', (['tr'], {'axis': '(1)'}), '(tr, axis=1)\n', (5985, 5997), True, 'import tensorflow as tf\n'), ((6026, 6038), 'tensorflow.shape', 'tf.shape', (['hr'], {}), '(hr)\n', (6034, 6038), True, 'import tensorflow as tf\n'), ((6044, 6056), 'tensorflow.shape', 'tf.shape', (['tr'], {}), '(tr)\n', (6052, 6056), True, 'import tensorflow as tf\n'), ((6222, 6248), 'tensorflow.expand_dims', 'tf.expand_dims', (['ti'], {'axis': '(1)'}), '(ti, axis=1)\n', (6236, 6248), True, 'import tensorflow as tf\n'), ((5861, 5887), 'tensorflow.expand_dims', 'tf.expand_dims', (['hr'], {'axis': '(1)'}), '(hr, axis=1)\n', (5875, 5887), True, 'import tensorflow as tf\n'), ((5891, 5917), 'tensorflow.expand_dims', 'tf.expand_dims', (['hi'], {'axis': '(1)'}), '(hi, axis=1)\n', (5905, 5917), True, 'import tensorflow as tf\n'), ((6095, 6121), 'tensorflow.expand_dims', 'tf.expand_dims', (['hr'], {'axis': '(1)'}), '(hr, axis=1)\n', (6109, 6121), True, 'import tensorflow as tf\n'), ((6129, 6155), 'tensorflow.expand_dims', 'tf.expand_dims', (['hi'], {'axis': '(1)'}), '(hi, axis=1)\n', (6143, 6155), True, 'import tensorflow as tf\n')] |
from __future__ import with_statement
from fabric.api import run, parallel, env, hide
from utils import FabricTest, eq_
from server import server, RESPONSES
class TestParallel(FabricTest):
@server()
@parallel
def test_parallel(self):
"""
Want to do a simple call and respond
"""
env.pool_size = 10
cmd = "ls /simple"
with hide('everything'):
eq_(run(cmd), RESPONSES[cmd])
| [
"fabric.api.run",
"server.server",
"fabric.api.hide"
] | [((199, 207), 'server.server', 'server', ([], {}), '()\n', (205, 207), False, 'from server import server, RESPONSES\n'), ((387, 405), 'fabric.api.hide', 'hide', (['"""everything"""'], {}), "('everything')\n", (391, 405), False, 'from fabric.api import run, parallel, env, hide\n'), ((423, 431), 'fabric.api.run', 'run', (['cmd'], {}), '(cmd)\n', (426, 431), False, 'from fabric.api import run, parallel, env, hide\n')] |
from datetime import datetime, date, timedelta
import pandas as pd
import networkx as nx
from itertools import combinations
import numpy as np
class TeamworkStudyRunner:
def __init__(self, notes, window_in_days, step_in_days):
notes.sort_values('date', inplace=True)
self.notes = notes
self.DELTA = np.timedelta64(window_in_days, 'D')
self.STEP = np.timedelta64(step_in_days, 'D')
first_date = notes['date'].iloc[0]
last_date = notes['date'].iloc[-1]
self.date_range = np.arange(first_date, last_date - self.DELTA, self.STEP)
def __iter__(self):
for start_date in self.date_range:
end_date = start_date + self.DELTA
date_of_care = end_date + self.STEP
notes_in_window = self.notes.query('date >= @start_date & date <= @end_date')
notes_for_care_date = self.notes.query('date > @end_date & date <= @date_of_care')
num_rows = len(notes_for_care_date.index)
if num_rows == 0: continue
yield CareDate(notes_in_window, notes_for_care_date)
class CareDate:
def __init__(self, notes_in_window, notes_for_care_date):
self.notes_in_window = notes_in_window
self.notes_for_care_date = notes_for_care_date
self.care_team_dict = {}
self.__populate_care_team_dict()
def __populate_care_team_dict(self):
discharge_ids_for_date = self.notes_for_care_date.discharge_id.unique()
for discharge_id in discharge_ids_for_date:
drs_for_discharge_id = self.notes_for_care_date.query('discharge_id == @discharge_id').dr.unique()
self.care_team_dict[discharge_id] = drs_for_discharge_id
def __iter__(self):
for discharge_id, care_team in self.care_team_dict.items():
yield CareTeam(self.notes_in_window, discharge_id, care_team)
class CareTeam:
def __init__(self, notes_in_window, discharge_id, care_team):
self.notes_in_window = notes_in_window
self.discharge_id = discharge_id
self.care_team = care_team
self.care_team_edges = [sorted(edge) for edge in list(combinations(care_team, 2))]
self.G = nx.Graph()
self.unique_dates = notes_in_window.date.unique()
self.__create_graph()
def __create_graph(self):
for note_date in self.unique_dates:
notes_for_date = self.notes_in_window.query('date == @note_date')
discharge_ids_for_date = notes_for_date.discharge_id.unique()
for discharge_id in discharge_ids_for_date:
drs_for_discharge_id = notes_for_date.query('discharge_id == @discharge_id').dr.unique()
care_team_edges_for_discharge_id = [edge for edge in list(combinations(drs_for_discharge_id, 2))
if sorted(edge) in self.care_team_edges]
for edge in care_team_edges_for_discharge_id:
self.__add_edge_to_G(edge)
def __add_edge_to_G(self, edge):
data = self.G.get_edge_data(*edge, default=None)
weight = 1 if data is None else data['weight'] + 1
self.G.add_edge(*edge, weight=weight) | [
"numpy.timedelta64",
"itertools.combinations",
"networkx.Graph",
"numpy.arange"
] | [((330, 365), 'numpy.timedelta64', 'np.timedelta64', (['window_in_days', '"""D"""'], {}), "(window_in_days, 'D')\n", (344, 365), True, 'import numpy as np\n'), ((386, 419), 'numpy.timedelta64', 'np.timedelta64', (['step_in_days', '"""D"""'], {}), "(step_in_days, 'D')\n", (400, 419), True, 'import numpy as np\n'), ((533, 589), 'numpy.arange', 'np.arange', (['first_date', '(last_date - self.DELTA)', 'self.STEP'], {}), '(first_date, last_date - self.DELTA, self.STEP)\n', (542, 589), True, 'import numpy as np\n'), ((2234, 2244), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (2242, 2244), True, 'import networkx as nx\n'), ((2188, 2214), 'itertools.combinations', 'combinations', (['care_team', '(2)'], {}), '(care_team, 2)\n', (2200, 2214), False, 'from itertools import combinations\n'), ((2799, 2836), 'itertools.combinations', 'combinations', (['drs_for_discharge_id', '(2)'], {}), '(drs_for_discharge_id, 2)\n', (2811, 2836), False, 'from itertools import combinations\n')] |
# nodenet/utilities/commons.py
# Description:
# "commons.py" provide commons utilities that can be use widely.
# Copyright 2018 NOOXY. All Rights Reserved.
from nodenet.imports.commons import *
import numpy as np2
# np2 for cupy compabable
def cut_dataset_by_ratio_ramdom(datasets, cut_ratio = 0.1):
dimension = len(datasets[0].shape)
valid_data_size = int(len(datasets[0])*cut_ratio)
input_data = np2.array(datasets[0].tolist())
output_data = np2.array(datasets[1].tolist())
input_data_valid = np2.empty([0]+list(input_data.shape[1:len(input_data.shape)]))
output_data_valid = np2.empty([0]+list(output_data.shape[1:len(output_data.shape)]))
for x in range(valid_data_size):
index = np2.random.randint(len(input_data))
input_data_valid = np2.concatenate((input_data_valid, np2.array(([input_data[index].tolist()]))), axis=0)
output_data_valid = np2.concatenate((output_data_valid, np2.array(([output_data[index].tolist()]))), axis=0)
input_data = np2.delete(input_data, index, axis=0)
output_data = np2.delete(output_data, index, axis=0)
input_data = np.array(input_data.tolist())
output_data = np.array(output_data.tolist())
input_data_valid = np.array(input_data_valid.tolist())
output_data_valid = np.array(output_data_valid.tolist())
return [input_data, output_data, input_data_valid, output_data_valid]
def shuffle_datasets(datasets):
a = np2.array(datasets[0].tolist())
b = np2.array(datasets[1].tolist())
assert len(a) == len(b)
order = np2.random.permutation(len(a))
return [np.array(a[order].tolist()), np.array(b[order].tolist())]
def get_mini_batch_ramdom(datasets, mini_batch_size):
input_data = datasets[0]
output_data = datasets[1]
rand_range = len(input_data)-mini_batch_size
start_index = 0
if rand_range != 0:
start_index = int(np.random.randint(len(input_data)-mini_batch_size))
return [input_data[start_index:start_index+mini_batch_size], output_data[start_index:start_index+mini_batch_size]]
def get_mini_batch_ramdom2(datasets, mini_batch_size):
dimension = len(datasets[0].shape)
data_size = mini_batch_size
input_data = datasets[0]
output_data = datasets[1]
index_list = []
input_data_result = np.empty([0]+list(input_data.shape[1:len(input_data.shape)]))
output_data_result = np.empty([0]+list(input_data.shape[1:len(input_data.shape)]))
index = np.random.randint(len(input_data))
for x in range(data_size):
while index in index_list:
index = np.random.randint(len(input_data))
index_list.append(index)
input_data_result = np.concatenate((input_data_result, [input_data[index]]))
output_data_result = np.concatenate((output_data_result, [output_data[index]]))
return [input_data_result, output_data_result]
| [
"numpy.delete"
] | [((1010, 1047), 'numpy.delete', 'np2.delete', (['input_data', 'index'], {'axis': '(0)'}), '(input_data, index, axis=0)\n', (1020, 1047), True, 'import numpy as np2\n'), ((1070, 1108), 'numpy.delete', 'np2.delete', (['output_data', 'index'], {'axis': '(0)'}), '(output_data, index, axis=0)\n', (1080, 1108), True, 'import numpy as np2\n')] |
"""
Configuration file for the Sphinx documentation builder.
This file only contains a selection of the most common options. For a full
list see the documentation:
http://www.sphinx-doc.org/en/master/config
"""
# pylint: disable=import-error, invalid-name, redefined-builtin
import datetime as dt
import sphinx_rtd_theme
from sphinx.writers.html import HTMLTranslator
#
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'MIT Satori User Documentation'
copyright = '%s, MIT Satori Project' % dt.datetime.now().year
author = 'MIT Satori Project Team'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx_rtd_theme",
]
# Add any paths that contain templates here, relative to this directory.
##CNH# templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
##CNH# html_theme = 'sphinx_rtd_theme'
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
##CNH# html_static_path = ['_static']
##CNH# html_css_files = [
##CNH# 'css/theme_overrides.css',
##CNH# ]
##CNH# html_js_files = [
##CNH# 'js/custom.js',
##CNH# ]
html_context = {
'vcs_pageview_mode': 'edit',
'display_github': True,
'github_user': 'mit-satori', # Username
'github_repo': 'getting-started', # Repo name
'github_version': 'master', # Version
'conf_py_path': '/', # Path in the checkout to the docs root
}
# see https://sphinx-rtd-theme.readthedocs.io/en/stable/configuring.html
html_theme_options = {
'canonical_url': 'https://researchcomputing.mit.edu/',
'collapse_navigation': False,
'sticky_navigation': True,
'navigation_depth': 4,
'style_external_links': True,
}
# pylint: disable=too-few-public-methods
class PatchedHTMLTranslator(HTMLTranslator):
'''HTMLTranslator patched to open external links in new tabs.
Taken from: 'http://jack.rosenth.al/hacking-docutils.html#sphinx-hacks'
'''
def visit_reference(self, node):
'''Sets link target to '_blank' (new page tab) if link node is
external to the site.
'''
if (node.get('newtab')
or not (node.get('target')
or node.get('internal')
or 'refuri' not in node)):
node['target'] = '_blank'
super().visit_reference(node)
def setup(app):
'''Function to setup sphinx customizations.'''
app.set_translator('html', PatchedHTMLTranslator)
# globally-available substitutions
rst_prolog = r"""
.. |R| replace:: \ :sup:`®`
"""
| [
"datetime.datetime.now"
] | [((925, 942), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (940, 942), True, 'import datetime as dt\n')] |
import tensorflow as tf
import csv
import time
from datetime import timedelta
import sys
import numpy as np
from tensorflow.python.training import training_util
from tensorflow.contrib import slim
from tensorflow.python.ops import variables as tf_variables
from ..configuration import *
from .. import trainer, evaluator, metrics
from ..task_spec import get_task_spec
from .text_classification_dataset import TextClassificationDataset
def _load_embeddings(vocabulary_size, embeddings_size,
filename_prefix='embeddings', from_dir=DIR_DATA_WORD2VEC):
embeddings = []
embeddings_file = '{}_{}_{}'.format(filename_prefix, vocabulary_size, embeddings_size)
with open(os.path.join(from_dir, embeddings_file), 'r') as file:
reader = csv.reader(file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for row in reader:
embeddings.append([float(r) for r in row])
return embeddings
class TextClassificationTrainer(trainer.Trainer):
"""
Helper class to run the training and create the model for the training. See trainer.Trainer for
more details.
"""
def __init__(self, dataset, text_classification_model, log_dir=DIR_TC_LOGDIR,
use_end_sequence=False, task_spec=None, max_steps=None):
self.text_classification_model = text_classification_model
self.use_end_sequence = use_end_sequence
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
super(TextClassificationTrainer, self).__init__(log_dir=log_dir, dataset=dataset,
task_spec=task_spec, max_steps=max_steps,
monitored_training_session_config=config)
def model(self, input_text_begin, input_text_end, gene, variation, expected_labels, batch_size,
vocabulary_size=VOCABULARY_SIZE, embeddings_size=EMBEDDINGS_SIZE, output_classes=9):
# embeddings
embeddings = _load_embeddings(vocabulary_size, embeddings_size)
# global step
self.global_step = training_util.get_or_create_global_step()
# model
with slim.arg_scope(self.text_classification_model.model_arg_scope()):
outputs = self.text_classification_model.model(input_text_begin, input_text_end,
gene, variation, output_classes,
embeddings=embeddings,
batch_size=batch_size)
# loss
targets = self.text_classification_model.targets(expected_labels, output_classes)
self.loss = self.text_classification_model.loss(targets, outputs)
tf.summary.scalar('loss', self.loss)
# learning rate
self.optimizer, self.learning_rate = \
self.text_classification_model.optimize(self.loss, self.global_step)
if self.learning_rate is not None:
tf.summary.scalar('learning_rate', self.learning_rate)
# metrics
self.metrics = metrics.single_label(outputs['prediction'], targets)
# saver to save the model
self.saver = tf.train.Saver()
# check a nan value in the loss
self.loss = tf.check_numerics(self.loss, 'loss is nan')
return None
def create_graph(self, dataset_tensor, batch_size):
input_text_begin, input_text_end, gene, variation, expected_labels = dataset_tensor
if not self.use_end_sequence:
input_text_end = None
return self.model(input_text_begin, input_text_end, gene, variation, expected_labels, batch_size)
def step(self, session, graph_data):
lr, _, loss, step, metrics = \
session.run([self.learning_rate, self.optimizer, self.loss, self.global_step,
self.metrics])
if self.is_chief and time.time() > self.print_timestamp + 5 * 60:
self.print_timestamp = time.time()
elapsed_time = str(timedelta(seconds=time.time() - self.init_time))
m = 'step: {} loss: {:0.4f} learning_rate = {:0.6f} elapsed seconds: {} ' \
'precision: {} recall: {} accuracy: {}'
logging.info(m.format(step, loss, lr, elapsed_time,
metrics['precision'], metrics['recall'], metrics['accuracy']))
def after_create_session(self, session, coord):
self.init_time = time.time()
self.print_timestamp = time.time()
class TextClassificationTest(evaluator.Evaluator):
"""Evaluator for distributed training"""
def __init__(self, dataset, text_classification_model, output_path, log_dir=DIR_TC_LOGDIR,
use_end_sequence=False,max_steps=None):
self.use_end_sequence = use_end_sequence
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
super(TextClassificationTest, self).__init__(checkpoints_dir=log_dir, dataset=dataset,
output_path=output_path, max_steps=max_steps,
singular_monitored_session_config=config)
self.text_classification_model = text_classification_model
self.eval_writer = tf.summary.FileWriter(log_dir)
def model(self, input_text_begin, input_text_end, gene, variation, expected_labels, batch_size,
vocabulary_size=VOCABULARY_SIZE, embeddings_size=EMBEDDINGS_SIZE, output_classes=9):
# embeddings
embeddings = _load_embeddings(vocabulary_size, embeddings_size)
# model
with slim.arg_scope(self.text_classification_model.model_arg_scope()):
outputs = self.text_classification_model.model(input_text_begin, input_text_end,
gene, variation, output_classes,
embeddings=embeddings,
batch_size=batch_size,
training=False)
# loss
targets = self.text_classification_model.targets(expected_labels, output_classes)
loss = self.text_classification_model.loss(targets, outputs)
self.accumulated_loss = tf.Variable(0.0, dtype=tf.float32, name='accumulated_loss',
trainable=False)
self.accumulated_loss = tf.assign_add(self.accumulated_loss, loss)
step = tf.Variable(0, dtype=tf.int32, name='eval_step', trainable=False)
step_increase = tf.assign_add(step, 1)
self.loss = self.accumulated_loss / tf.cast(step_increase, dtype=tf.float32)
tf.summary.scalar('loss', self.loss)
# metrics
self.metrics = metrics.single_label(outputs['prediction'], targets, moving_average=False)
return None
def create_graph(self, dataset_tensor, batch_size):
input_text_begin, input_text_end, gene, variation, expected_labels = dataset_tensor
if not self.use_end_sequence:
input_text_end = None
graph_data = self.model(input_text_begin, input_text_end, gene, variation,
expected_labels, batch_size)
return graph_data
def step(self, session, graph_data, summary_op):
summary, self.loss_result, self.metrics_results = \
session.run([summary_op, self.loss, self.metrics])
return summary
def end(self, session):
super(TextClassificationTest, self).end(session)
chk_step = int(self.lastest_checkpoint.split('-')[-1])
m = 'step: {} loss: {:0.4f} precision: {} recall: {} accuracy: {}'
logging.info(m.format(chk_step, self.loss_result, self.metrics_results['precision'],
self.metrics_results['recall'], self.metrics_results['accuracy']))
def after_create_session(self, session, coord):
# checkpoints_file = os.path.join(self.checkpoints_dir, 'checkpoint')
# alt_checkpoints_dir = '{}_tp'.format(self.checkpoints_dir)
# import glob
# files = glob.glob('{}/model.ckpt-*.data-*'.format(alt_checkpoints_dir))
# chk_step = 0
# for f in files:
# num = f.split('model.ckpt-')[1].split('.')[0]
# num = int(num)
# if chk_step == 0 or num < chk_step:
# chk_step = num
# if chk_step != 0:
# ckpt_files = glob.glob('{}/model.ckpt-{}.data-*'.format(alt_checkpoints_dir, chk_step))
# ckpt_files = [x.split('/')[-1] for x in ckpt_files]
# for f in ckpt_files + ['model.ckpt-{}.index', 'model.ckpt-{}.meta']:
# f = f.format(chk_step)
# os.rename(os.path.join(alt_checkpoints_dir, f), os.path.join(self.checkpoints_dir, f))
# with open(checkpoints_file, 'wb') as f:
# f.write('model_checkpoint_path: "./model.ckpt-{}"\n'.format(chk_step))
# f.write('all_model_checkpoint_paths: "./model.ckpt-{}"\n'.format(chk_step))
super(TextClassificationTest, self).after_create_session(session, coord)
# with open(checkpoints_file, 'wb') as f:
# f.write('model_checkpoint_path: "./model.ckpt-"\n')
# f.write('all_model_checkpoint_paths: "./model.ckpt-"\n')
class TextClassificationEval(evaluator.Evaluator):
"""Evaluator for text classification"""
def __init__(self, dataset, text_classification_model, output_path, log_dir=DIR_TC_LOGDIR,
use_end_sequence=False):
self.use_end_sequence = use_end_sequence
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
super(TextClassificationEval, self).__init__(checkpoints_dir=log_dir,
output_path=output_path,
infinite_loop=False,
singular_monitored_session_config=config)
self.dataset = dataset
self.text_classification_model = text_classification_model
def model(self, input_text_begin, input_text_end, gene, variation, batch_size,
vocabulary_size=VOCABULARY_SIZE, embeddings_size=EMBEDDINGS_SIZE, output_classes=9):
# embeddings
embeddings = _load_embeddings(vocabulary_size, embeddings_size)
# global step
self.global_step = training_util.get_or_create_global_step()
self.global_step = tf.assign_add(self.global_step, 1)
# model
with tf.control_dependencies([self.global_step]):
with slim.arg_scope(self.text_classification_model.model_arg_scope()):
self.outputs = self.text_classification_model.model(input_text_begin, input_text_end,
gene, variation, output_classes,
embeddings=embeddings,
batch_size=batch_size,
training=False)
# restore only the trainable variables
self.saver = tf.train.Saver(var_list=tf_variables.trainable_variables())
return self.outputs
def create_graph(self, dataset_tensor, batch_size):
input_text_begin, input_text_end, gene, variation = dataset_tensor
if not self.use_end_sequence:
input_text_end = None
return self.model(input_text_begin, input_text_end, gene, variation, batch_size)
def after_create_session(self, session, coord):
super(TextClassificationEval, self).after_create_session(session, coord)
print('ID,class1,class2,class3,class4,class5,class6,class7,class8,class9')
def step(self, session, graph_data, summary_op):
step, predictions = session.run([self.global_step, self.outputs['prediction']])
predictions = predictions[0]
predictions = [p + 0.01 for p in predictions] # penalize less the mistakes
sum = np.sum(predictions)
predictions = [p / sum for p in predictions]
print('{},{}'.format(step, ','.join(['{:.3f}'.format(x) for x in predictions])))
return None
import logging
def main(model, name, sentence_split=False, end_sequence=USE_END_SEQUENCE, batch_size=TC_BATCH_SIZE):
"""
Main method to execute the text_classification models
:param ModelSimple model: object model based on ModelSimple
:param str name: name of the model
:param bool sentence_split: whether to split the dataset in sentneces or not,
only used for hatt model
:param bool end_sequence: whether to use or not the end of the sequences in the models
:param int batch_size: batch size of the models
"""
logging.getLogger().setLevel(logging.INFO)
log_dir = '{}_{}'.format(DIR_TC_LOGDIR, name)
if len(sys.argv) > 1 and sys.argv[1] == 'test':
# execute the test with the train dataset
dataset = TextClassificationDataset(type='train', sentence_split=sentence_split)
tester = TextClassificationTest(dataset=dataset, text_classification_model=model,
log_dir=log_dir,
output_path=os.path.join(log_dir, 'test_trainset'),
use_end_sequence=end_sequence)
tester.run()
elif len(sys.argv) > 1 and sys.argv[1] == 'validate':
dataset = TextClassificationDataset(type='val', sentence_split=sentence_split)
tester = TextClassificationTest(dataset=dataset, text_classification_model=model,
log_dir=log_dir,
output_path=os.path.join(log_dir, 'validate'),
use_end_sequence=end_sequence)
tester.run()
elif len(sys.argv) > 1 and sys.argv[1] == 'eval':
# evaluate the data of the test dataset. We submit this output to kaggle
dataset = TextClassificationDataset(type='test', sentence_split=sentence_split)
evaluator = TextClassificationEval(dataset=dataset, text_classification_model=model,
log_dir=log_dir,
output_path=os.path.join(log_dir, 'test'),
use_end_sequence=end_sequence)
evaluator.run()
elif len(sys.argv) > 1 and sys.argv[1] == 'eval_stage2':
# evaluate the data of the test dataset. We submit this output to kaggle
dataset = TextClassificationDataset(type='stage2_test', sentence_split=sentence_split)
evaluator = TextClassificationEval(dataset=dataset, text_classification_model=model,
log_dir=log_dir,
output_path=os.path.join(log_dir, 'test_stage2'),
use_end_sequence=end_sequence)
evaluator.run()
else:
# training
task_spec = get_task_spec(with_evaluator=USE_LAST_WORKER_FOR_VALIDATION)
if task_spec.join_if_ps():
# join if it is a parameters server and do nothing else
return
with(tf.gfile.Open(os.path.join(DIR_DATA_TEXT_CLASSIFICATION, 'train_set'))) as f:
max_steps = int(TC_EPOCHS * len(f.readlines()) / batch_size)
if task_spec.is_evaluator():
dataset = TextClassificationDataset(type='val', sentence_split=sentence_split)
# evaluator running in the last worker
tester = TextClassificationTest(dataset=dataset, text_classification_model=model,
log_dir=log_dir,
output_path=os.path.join(log_dir, 'val'),
use_end_sequence=end_sequence,
max_steps=max_steps)
tester.run()
else:
dataset = TextClassificationDataset(type='train', sentence_split=sentence_split)
trainer = TextClassificationTrainer(dataset=dataset, text_classification_model=model,
log_dir=log_dir, use_end_sequence=end_sequence,
task_spec=task_spec, max_steps=max_steps)
trainer.run(epochs=TC_EPOCHS, batch_size=batch_size)
| [
"tensorflow.cast",
"logging.getLogger",
"tensorflow.python.training.training_util.get_or_create_global_step",
"tensorflow.check_numerics",
"tensorflow.Variable",
"tensorflow.train.Saver",
"numpy.sum",
"tensorflow.control_dependencies",
"tensorflow.assign_add",
"time.time",
"tensorflow.python.ops... | [((769, 842), 'csv.reader', 'csv.reader', (['file'], {'delimiter': '""","""', 'quotechar': '"""\\""""', 'quoting': 'csv.QUOTE_MINIMAL'}), '(file, delimiter=\',\', quotechar=\'"\', quoting=csv.QUOTE_MINIMAL)\n', (779, 842), False, 'import csv\n'), ((1423, 1439), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (1437, 1439), True, 'import tensorflow as tf\n'), ((2116, 2157), 'tensorflow.python.training.training_util.get_or_create_global_step', 'training_util.get_or_create_global_step', ([], {}), '()\n', (2155, 2157), False, 'from tensorflow.python.training import training_util\n'), ((2791, 2827), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'self.loss'], {}), "('loss', self.loss)\n", (2808, 2827), True, 'import tensorflow as tf\n'), ((3242, 3258), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (3256, 3258), True, 'import tensorflow as tf\n'), ((3319, 3362), 'tensorflow.check_numerics', 'tf.check_numerics', (['self.loss', '"""loss is nan"""'], {}), "(self.loss, 'loss is nan')\n", (3336, 3362), True, 'import tensorflow as tf\n'), ((4512, 4523), 'time.time', 'time.time', ([], {}), '()\n', (4521, 4523), False, 'import time\n'), ((4555, 4566), 'time.time', 'time.time', ([], {}), '()\n', (4564, 4566), False, 'import time\n'), ((4884, 4900), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (4898, 4900), True, 'import tensorflow as tf\n'), ((5331, 5361), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['log_dir'], {}), '(log_dir)\n', (5352, 5361), True, 'import tensorflow as tf\n'), ((6380, 6456), 'tensorflow.Variable', 'tf.Variable', (['(0.0)'], {'dtype': 'tf.float32', 'name': '"""accumulated_loss"""', 'trainable': '(False)'}), "(0.0, dtype=tf.float32, name='accumulated_loss', trainable=False)\n", (6391, 6456), True, 'import tensorflow as tf\n'), ((6533, 6575), 'tensorflow.assign_add', 'tf.assign_add', (['self.accumulated_loss', 'loss'], {}), '(self.accumulated_loss, loss)\n', (6546, 6575), True, 'import tensorflow as tf\n'), ((6591, 6656), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'dtype': 'tf.int32', 'name': '"""eval_step"""', 'trainable': '(False)'}), "(0, dtype=tf.int32, name='eval_step', trainable=False)\n", (6602, 6656), True, 'import tensorflow as tf\n'), ((6681, 6703), 'tensorflow.assign_add', 'tf.assign_add', (['step', '(1)'], {}), '(step, 1)\n', (6694, 6703), True, 'import tensorflow as tf\n'), ((6797, 6833), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'self.loss'], {}), "('loss', self.loss)\n", (6814, 6833), True, 'import tensorflow as tf\n'), ((9735, 9751), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (9749, 9751), True, 'import tensorflow as tf\n'), ((10547, 10588), 'tensorflow.python.training.training_util.get_or_create_global_step', 'training_util.get_or_create_global_step', ([], {}), '()\n', (10586, 10588), False, 'from tensorflow.python.training import training_util\n'), ((10616, 10650), 'tensorflow.assign_add', 'tf.assign_add', (['self.global_step', '(1)'], {}), '(self.global_step, 1)\n', (10629, 10650), True, 'import tensorflow as tf\n'), ((12220, 12239), 'numpy.sum', 'np.sum', (['predictions'], {}), '(predictions)\n', (12226, 12239), True, 'import numpy as np\n'), ((3036, 3090), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""learning_rate"""', 'self.learning_rate'], {}), "('learning_rate', self.learning_rate)\n", (3053, 3090), True, 'import tensorflow as tf\n'), ((4031, 4042), 'time.time', 'time.time', ([], {}), '()\n', (4040, 4042), False, 'import time\n'), ((6748, 6788), 'tensorflow.cast', 'tf.cast', (['step_increase'], {'dtype': 'tf.float32'}), '(step_increase, dtype=tf.float32)\n', (6755, 6788), True, 'import tensorflow as tf\n'), ((10680, 10723), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[self.global_step]'], {}), '([self.global_step])\n', (10703, 10723), True, 'import tensorflow as tf\n'), ((12958, 12977), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (12975, 12977), False, 'import logging\n'), ((3951, 3962), 'time.time', 'time.time', ([], {}), '()\n', (3960, 3962), False, 'import time\n'), ((11369, 11403), 'tensorflow.python.ops.variables.trainable_variables', 'tf_variables.trainable_variables', ([], {}), '()\n', (11401, 11403), True, 'from tensorflow.python.ops import variables as tf_variables\n'), ((4092, 4103), 'time.time', 'time.time', ([], {}), '()\n', (4101, 4103), False, 'import time\n')] |
import click
from retrieval.elastic_retriever import ElasticRetriever
import os
@click.command()
@click.option('--sections-parquet', type=str, help='', default='')
@click.option('--documents-parquet', type=str, help='', default='')
@click.option('--tables-parquet', type=str, help='', default='')
@click.option('--figures-parquet', type=str, help='', default='')
@click.option('--equations-parquet', type=str, help='', default='')
@click.option('--entities-parquet', type=str, help='', default='')
@click.option('--aws-host', type=str, help='', default='')
@click.option('--host', type=str, help='', default='localhost')
def run(sections_parquet, documents_parquet, tables_parquet, figures_parquet, equations_parquet, entities_parquet, aws_host, host):
if aws_host != '':
auth = AWS4Auth(os.environ.get('AWS_ACCESS_KEY_ID'), os.environ.get('AWS_SECRET_ACCESS_KEY'), os.environ.get('AWS_DEFAULT_REGION'), 'es', session_token=os.environ.get('AWS_SESSION_TOKEN'))
ret = ElasticRetriever(hosts=[{'host':aws_host, 'port':443}], awsauth=auth)
else:
ret = ElasticRetriever(hosts=[host])
print('Connected to retriever, building indices')
ret.build_index(documents_parquet, entities_parquet, sections_parquet, tables_parquet, figures_parquet, equations_parquet)
print('Done building index')
if __name__ == '__main__':
run()
| [
"click.option",
"click.command",
"retrieval.elastic_retriever.ElasticRetriever",
"os.environ.get"
] | [((82, 97), 'click.command', 'click.command', ([], {}), '()\n', (95, 97), False, 'import click\n'), ((99, 164), 'click.option', 'click.option', (['"""--sections-parquet"""'], {'type': 'str', 'help': '""""""', 'default': '""""""'}), "('--sections-parquet', type=str, help='', default='')\n", (111, 164), False, 'import click\n'), ((166, 232), 'click.option', 'click.option', (['"""--documents-parquet"""'], {'type': 'str', 'help': '""""""', 'default': '""""""'}), "('--documents-parquet', type=str, help='', default='')\n", (178, 232), False, 'import click\n'), ((234, 297), 'click.option', 'click.option', (['"""--tables-parquet"""'], {'type': 'str', 'help': '""""""', 'default': '""""""'}), "('--tables-parquet', type=str, help='', default='')\n", (246, 297), False, 'import click\n'), ((299, 363), 'click.option', 'click.option', (['"""--figures-parquet"""'], {'type': 'str', 'help': '""""""', 'default': '""""""'}), "('--figures-parquet', type=str, help='', default='')\n", (311, 363), False, 'import click\n'), ((365, 431), 'click.option', 'click.option', (['"""--equations-parquet"""'], {'type': 'str', 'help': '""""""', 'default': '""""""'}), "('--equations-parquet', type=str, help='', default='')\n", (377, 431), False, 'import click\n'), ((433, 498), 'click.option', 'click.option', (['"""--entities-parquet"""'], {'type': 'str', 'help': '""""""', 'default': '""""""'}), "('--entities-parquet', type=str, help='', default='')\n", (445, 498), False, 'import click\n'), ((500, 557), 'click.option', 'click.option', (['"""--aws-host"""'], {'type': 'str', 'help': '""""""', 'default': '""""""'}), "('--aws-host', type=str, help='', default='')\n", (512, 557), False, 'import click\n'), ((559, 621), 'click.option', 'click.option', (['"""--host"""'], {'type': 'str', 'help': '""""""', 'default': '"""localhost"""'}), "('--host', type=str, help='', default='localhost')\n", (571, 621), False, 'import click\n'), ((988, 1059), 'retrieval.elastic_retriever.ElasticRetriever', 'ElasticRetriever', ([], {'hosts': "[{'host': aws_host, 'port': 443}]", 'awsauth': 'auth'}), "(hosts=[{'host': aws_host, 'port': 443}], awsauth=auth)\n", (1004, 1059), False, 'from retrieval.elastic_retriever import ElasticRetriever\n'), ((1082, 1112), 'retrieval.elastic_retriever.ElasticRetriever', 'ElasticRetriever', ([], {'hosts': '[host]'}), '(hosts=[host])\n', (1098, 1112), False, 'from retrieval.elastic_retriever import ElasticRetriever\n'), ((801, 836), 'os.environ.get', 'os.environ.get', (['"""AWS_ACCESS_KEY_ID"""'], {}), "('AWS_ACCESS_KEY_ID')\n", (815, 836), False, 'import os\n'), ((838, 877), 'os.environ.get', 'os.environ.get', (['"""AWS_SECRET_ACCESS_KEY"""'], {}), "('AWS_SECRET_ACCESS_KEY')\n", (852, 877), False, 'import os\n'), ((879, 915), 'os.environ.get', 'os.environ.get', (['"""AWS_DEFAULT_REGION"""'], {}), "('AWS_DEFAULT_REGION')\n", (893, 915), False, 'import os\n'), ((937, 972), 'os.environ.get', 'os.environ.get', (['"""AWS_SESSION_TOKEN"""'], {}), "('AWS_SESSION_TOKEN')\n", (951, 972), False, 'import os\n')] |
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
import tqdm
from data import dataset as dset
import torchvision.models as tmodels
import tqdm
from models import models
import os
import itertools
import glob
from utils import utils
import torch.backends.cudnn as cudnn
cudnn.benchmark = True
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', default='mitstates', help='mitstates|zappos')
parser.add_argument('--data_dir', default='data/mit-states/', help='data root dir')
parser.add_argument('--cv_dir', default='cv/tmp/', help='dir to save checkpoints to')
parser.add_argument('--load', default=None, help='path to checkpoint to load from')
# model parameters
parser.add_argument('--model', default='visprodNN', help='visprodNN|redwine|labelembed+|attributeop')
parser.add_argument('--emb_dim', type=int, default=300, help='dimension of common embedding space')
parser.add_argument('--nlayers', type=int, default=2, help='number of layers for labelembed+')
parser.add_argument('--glove_init', action='store_true', default=False, help='initialize inputs with word vectors')
parser.add_argument('--clf_init', action='store_true', default=False, help='initialize inputs with SVM weights')
parser.add_argument('--static_inp', action='store_true', default=False, help='do not optimize input representations')
# regularizers
parser.add_argument('--lambda_aux', type=float, default=0.0)
parser.add_argument('--lambda_inv', type=float, default=0.0)
parser.add_argument('--lambda_comm', type=float, default=0.0)
parser.add_argument('--lambda_ant', type=float, default=0.0)
# optimization
parser.add_argument('--batch_size', type=int, default=512)
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--wd', type=float, default=5e-5)
parser.add_argument('--save_every', type=int, default=100)
parser.add_argument('--eval_val_every', type=int, default=20)
parser.add_argument('--max_epochs', type=int, default=1000)
args = parser.parse_args()
def test(epoch):
model.eval()
accuracies = []
for idx, data in tqdm.tqdm(enumerate(testloader), total=len(testloader)):
data = [d.cuda() for d in data]
_, predictions = model(data)
attr_truth, obj_truth = data[1], data[2]
results = evaluator.score_model(predictions, obj_truth)
match_stats = evaluator.evaluate_predictions(results, attr_truth, obj_truth)
accuracies.append(match_stats)
accuracies = zip(*accuracies)
accuracies = map(torch.mean, map(torch.cat, accuracies))
attr_acc, obj_acc, closed_acc, open_acc, objoracle_acc = accuracies
print ('(test) E: %d | A: %.3f | O: %.3f | Cl: %.3f | Op: %.4f | OrO: %.4f'%(epoch, attr_acc, obj_acc, closed_acc, open_acc, objoracle_acc))
#----------------------------------------------------------------#
testset = dset.CompositionDatasetActivations(root=args.data_dir, phase='test', split='compositional-split')
testloader = torch.utils.data.DataLoader(testset, batch_size=args.batch_size, shuffle=False, num_workers=2)
if args.model == 'visprodNN':
model = models.VisualProductNN(testset, args)
elif args.model == 'redwine':
model = models.RedWine(testset, args)
elif args.model =='labelembed+':
model = models.LabelEmbedPlus(testset, args)
elif args.model =='attributeop':
model = models.AttributeOperator(testset, args)
model.cuda()
evaluator = models.Evaluator(testset, model)
checkpoint = torch.load(args.load)
model.load_state_dict(checkpoint['net'])
start_epoch = checkpoint['epoch']
print ('loaded model from', os.path.basename(args.load))
with torch.no_grad():
test(0)
| [
"data.dataset.CompositionDatasetActivations",
"models.models.Evaluator",
"argparse.ArgumentParser",
"models.models.VisualProductNN",
"torch.load",
"models.models.LabelEmbedPlus",
"models.models.AttributeOperator",
"models.models.RedWine",
"os.path.basename",
"torch.utils.data.DataLoader",
"torch... | [((386, 411), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (409, 411), False, 'import argparse\n'), ((2913, 3015), 'data.dataset.CompositionDatasetActivations', 'dset.CompositionDatasetActivations', ([], {'root': 'args.data_dir', 'phase': '"""test"""', 'split': '"""compositional-split"""'}), "(root=args.data_dir, phase='test', split=\n 'compositional-split')\n", (2947, 3015), True, 'from data import dataset as dset\n'), ((3024, 3123), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['testset'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': '(2)'}), '(testset, batch_size=args.batch_size, shuffle=\n False, num_workers=2)\n', (3051, 3123), False, 'import torch\n'), ((3465, 3497), 'models.models.Evaluator', 'models.Evaluator', (['testset', 'model'], {}), '(testset, model)\n', (3481, 3497), False, 'from models import models\n'), ((3512, 3533), 'torch.load', 'torch.load', (['args.load'], {}), '(args.load)\n', (3522, 3533), False, 'import torch\n'), ((3162, 3199), 'models.models.VisualProductNN', 'models.VisualProductNN', (['testset', 'args'], {}), '(testset, args)\n', (3184, 3199), False, 'from models import models\n'), ((3637, 3664), 'os.path.basename', 'os.path.basename', (['args.load'], {}), '(args.load)\n', (3653, 3664), False, 'import os\n'), ((3672, 3687), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3685, 3687), False, 'import torch\n'), ((3242, 3271), 'models.models.RedWine', 'models.RedWine', (['testset', 'args'], {}), '(testset, args)\n', (3256, 3271), False, 'from models import models\n'), ((3317, 3353), 'models.models.LabelEmbedPlus', 'models.LabelEmbedPlus', (['testset', 'args'], {}), '(testset, args)\n', (3338, 3353), False, 'from models import models\n'), ((3399, 3438), 'models.models.AttributeOperator', 'models.AttributeOperator', (['testset', 'args'], {}), '(testset, args)\n', (3423, 3438), False, 'from models import models\n')] |
# -*- coding: utf-8 -*-
from flask_restful import Api
from resources.person import PersonResource
from resources.company import CompanyResource
#Define app end points
def get_endpoints(app):
api = Api(app)
api.add_resource(PersonResource,'/people','/people/<string:username>')
api.add_resource(CompanyResource,'/company/<string:name>')
return api
| [
"flask_restful.Api"
] | [((203, 211), 'flask_restful.Api', 'Api', (['app'], {}), '(app)\n', (206, 211), False, 'from flask_restful import Api\n')] |
# Notes
# This module not really designed for general purpose use. I wrote this as a study mechanism for
# decomposition algorithms. The codes are not well tested and may be naive.
#
# lapack working notes at http://www.netlib.org/lapack/lawns/
import pyJvsip as pv
def eye(t,n):
"""
Usage: I=eye(t,n)
create and return an identity matrix of size n and type t
t must be a matrix
"""
return pv.create(t,n,n).identity
def sign(a_in): # see LAPACK Working Notes 148 for definition of sign
"""
Function sign(alpha) returns the sign of scalar (real or complex) alpha.
"""
if type(a_in) is int:
a=float(a_in)
else:
a=a_in
if type(a) is float or type(a) is complex:
t=pv.vsip_hypot_d(a.real,a.imag)
if t == 0.0:
return 1.0
elif a.imag==0.0:
if a.real < 0.0:
return -1.0
else:
return 1.0
else:
return a/t
else:
print('sign function only works on scalars')
return
# householder routines
def houseVector(x):
"""
v = houseVector(x)
returns a normalized householder vector 'v' such that
the householder projection matrix 'H' is:
H = I - 2 v v*
"""
if 'vview' not in x.type:
print('Function houseVector only works on vector views')
return
v=x.copy
v[0] += (sign(x[0]) * x.norm2)
n = v.norm2
if n == 0.0:
v[0] = 1.0
else:
v /= n
return v
def house(v): # create and return househoulder rotation matrix for householder
# vector v; works for any valid househoulder vector
"""
Usage:
H=house(v)
Create and return a householder projector matrix given input householder vector v.
"""
t={'vview_f':'mview_f','vview_d':'mview_d','cvview_f':'cmview_f','cvview_d':'cmview_d'}
return(eye(t[v.type],v.length) - v.outer(2.0/v.jdot(v),v))
def houseProd(v,A):
"""
Usage:
houseProd(v,A)
using a householder vector V with a matrix of the proper size return HA
Note A is modified in-place; but there are create/destroy penalties with this function
Note a convenience reference to A is returned
"""
beta = 2.0/v.jdot(v)
v.conj;w=v.prod(A).conj;v.conj
A -= v.outer(beta,w)
return A
def prodHouse(A,v):
"""
Usage:
prodHouse(A,v)
using a householder vector V with a matrix of the proper size return AH
Note A is modified in-place; but there are create/destroy penalties with this function
Note a convenience reference to A is returned
"""
beta = 2.0/v.jdot(v)
w=A.prod(v)
A-=w.outer(beta,v)
return A
#Givens
def givensCoef(x1_in,x2_in):
""" Code adapted from Algorithm 1 of LAPACK working Notes lawn148
"""
if type(x1_in) is int:
x1=float(x1_in)
else:
x1 = x1_in
if type(x2_in) is int:
x2=float(x2_in)
else:
x2 = x2_in
if type(x1) is float and type(x2) is float:
t=pv.vsip_hypot_d(x1,x2)
if x2 == 0.0:
return (1.0,0.0,x1)
elif x1 == 0.0:
return (0.0,sign(x2),t)
else: # return (c,s,r)
sn=sign(x1)
return(pv.vsip_mag_d(x1)/t,sn*x2/t,sn*t)
elif type(x1) is complex or type(x2) is complex:
mx1=pv.vsip_hypot_d(x1.real,x1.imag)
mx2=pv.vsip_hypot_d(x2.real,x2.imag)
if mx2 == 0.0:
return(1.0,0.0,x1)
elif mx1 == 0.0:
return(0,sign(x2.conjugate()),mx2)
else:
t=pv.vsip_hypot_d(mx1,mx2)
c=mx1/t
sn=sign(x1)
s=(sn * x2.conjugate())/t
r=sn * t
return(c,s,r)
else:
print('Type <:'+repr(type(x1)) + ':> or <:'+ \
repr(type(x2))+':> not recognized by givensCoef')
return
def givens(t,i,j,c,s,size):
"""
Return an extended givens matrix.
An extended givens matrix is an identity matrix of size 'size'
with elements at (i,i) and (j,j) replaced with c,
the element at (i,j) replaced with s,
and the element at (j,i) replaced with -conjugate(s)
Usage:
G=givens(t,i,j,c,s,size)
Where:
t = type
i,j are index values for placement of
c,s which are obtained (probably) from function givensCoef.
size is an integer
"""
G=eye(t,size)
G[i,i]=c;G[j,j]=c;G[i,j]=s;G[j,i]=-s.conjugate()
return G
def gProd(i,j,c,s,A):
"""
Done in-place (A is modified)
Usage:
gProd(i,j,c,s,A)
where:
A is a matrix of size (m,n)
i,j,c,s are equivalent to a givens matrix G = givens(A.type,i,j,c,s,m)
does:
A = G A
returns:
reference to A as a convenience
"""
a1=A.rowview(i).copy
a2=A.rowview(j).copy
A.rowview(i)[:]= c * a1 + s * a2
A.rowview(j)[:]= c * a2 - s.conjugate() * a1
return A
def prodG(A,i,j,c,s):
"""
Done in-place (A is modified)
Usage:
prodG(A,i,j,c,s)
where:
A is a matrix of size (m,n)
i,j,c,s are equivalent to a givens matrix GH = givens(A.type,i,j,c,s,m).herm
does:
A = A GH
returns:
reference to A as a convenience
"""
a_i=A.colview(i).copy
a_j=A.colview(j).copy
A.colview(i)[:]= c * a_i + s.conjugate() * a_j
A.colview(j)[:]= c * a_j - s * a_i
return A
def gtProd(i,j,c,s,A):
"""
Done in-place (A is modified)
Usage:
gtProd(i,j,c,s,A)
where:
A is a matrix of size (m,n)
i,j,c,s are equivalent to a givens matrix G_TH = givens(A.type,i,j,c,s,m).transview.herm
does:
A = G_TH A
returns:
reference to A as a convenience
"""
a_i=A.rowview(i).copy
a_j=A.rowview(j).copy
A.rowview(i)[:]= c * a_i + s.conjugate() * a_j
A.rowview(j)[:]= c * a_j - s * a_i
return A
def prodGT(A,i,j,c,s):
"""
Done in-place (A is modified)
Usage:
prodG(A,i,j,c,s)
where:
A is a matrix of size (m,n)
i,j,c,s are equivalent to a givens matrix G_T = givens(A.type,i,j,c,s,m).transview
does:
A = A G_T
returns:
reference to A as a convenience
"""
a1 = A.colview(i).copy
a2 = A.colview(j).copy
A.colview(i)[:] = c * a1 + s * a2
A.colview(j)[:] = c * a2 -s.conjugate() * a1
return A
# QR decomposition
def QRD_inPlace(A):
"""
The function QRD_inPlace(A) is done in-place on matrix A. If you want to retain A make a copy first.
Usage:
QRD_inPlace(A)
Note that the decomposition represented is A=QR.
Matrix R is stored in the upper triangular portion of A.
Householder vectors are stored in the lower sub-triangular portion of A.
Householder vectors are normalized so that v[0] is 1.0;
"""
m=A.collength
n=A.rowlength
if m < n:
print('The input matrix must have collength >= rowlength.')
print('For matrices where rowlength > collength work with the transpose.')
for i in range(n-1):
x=A[i:,i:].colview(0)
v=houseVector(x)
v /= v[0]
A[i:,i:]=house(v).prod(A[i:,i:])
x[1:]=v[1:]
if m > n: #do last column if matrix not square
i=n-1
x=A[i:,i:].colview(0)
v=houseVector(x)
v /= v[0]
A[i:,i:]=house(v).prod(A[i:,i:])
x[1:]=v[1:]
def fullQProd(Q,B):
"""
Usage:
U=fullQProd(Q,B)
where Q is a matrix of size M,N where M >= N which was produced by QRD_inPlace(Q)
B is a matrix of size M,P
U is the matrix produced by the matrix product Q B where Q is the full Q matrix from a QR decomposition.
"""
m=Q.collength
n=B.rowlength
U=B.copy
if m > n: #extract last column if matrix is not square
i=n-1
v=Q[i:,i:].colview(0).copy
v[0]=1
houseProd(v,U[i:,i:])
for i in range(n-2,-1,-1):
v=Q[i:,i:].colview(0).copy
v[0]=1
houseProd(v,U[i:,i:])
return U
def QmatExtract(B):
"""
If B is a matrix which has been operated on by QRD_inPlace then
QmatExtract(B) will return the full Q matrix of the QR decomposition.
"""
m=B.collength
n=B.rowlength
Q=eye(B.type,m)
if m > n: #extract last column if matrix is not square
i=n-1
v=B[i:,i:].colview(0).copy
v[0]=1
houseProd(v,Q[i:,i:])
for i in range(n-2,-1,-1):
v=B[i:,i:].colview(0).copy
v[0]=1
houseProd(v,Q[i:,i:])
return Q
def RmatExtract(B):
"""
If B is a matrix which has been operated on by QRD_inPlace then
RmatExtract(B) returns a new matrix with the (full) R from the QR decomposition.
"""
R=B.copy
m=B.collength
for i in range(1,m):
R.diagview(-i).fill(0.0)
return R
def houseQR(A):
"""
Done out of place
Usage:
Q,R=houseQR(A)
where:
A is of size M, N; M >= N; A = Q R
Q is unitary
R is upper triangular
"""
R=A.copy
m=A.collength
n=A.rowlength
if m < n:
print('The input matrix must have collength >= rowlength.')
print('for matrices where rowlength > collength work with the transpose')
for i in range(n-1):
x=R[i:,i:].colview(0)
v=houseVector(x)
v /= v[0]
houseProd(v,R[i:,i:])
x[1:]=v[1:]
if m > n: #do last column if matrix not square
i=n-1
x=R[i:,i:].colview(0)
v=houseVector(x)
v /= v[0]
houseProd(v,R[i:,i:])
x[1:]=v[1:]
#accumulate Q
Q = QmatExtract(R)
#zero entries of R
for i in range(1,m):
R.diagview(-i).fill(0.0)
return (Q,R)
def bidiag(A): # m >= n
"""
B=bidiag(A)
returns, out of place, the bidiagonal decomposition of A.
The esential househoulder vectors are stored in the zeroed entries of B.
"""
B=A.copy
m=B.collength
n=B.rowlength
if m < n:
print('The input matrix must have collength >= rowlength.')
print('for matrices where rowlength > collength work with the transpose')
for i in range(n-1):
x=B[i:,i:].colview(0)
v=houseVector(x)
v /= v[0]
houseProd(v,B[i:,i:])
x[1:]=v[1:]
if i < n-2:
j=i+1
x = B[i:,j:].rowview(0)
#v=houseVector(x.conj);x.conj
v=houseVector(x).conj
v /= v[0]
prodHouse(B[i:,j:],v)#=B[i:,j:].prod(house(v))
x[1:]=v[1:]
if m > n: #do last column if matrix not square
i=n-1
x=B[i:,i:].colview(0)
v=houseVector(x)
v /= v[0]
houseProd(v,B[i:,i:])
x[1:]=v[1:]
return B
def bidiagExtract(B):
"""
B=bidiagExtract(B0)
Returns, out of place, a matrix with the bidiagonal entries.
Input matrix is one produced by B0=bidiag(A)
"""
B0=B.empty.fill(0.0)
B0.diagview(0)[:] = B.diagview(0)
B0.diagview(1)[:] = B.diagview(1)
return B0
def UmatExtract(B):
"""
U=UmatExtract(B0)
returns, out of place, the U matrix of the bidiagonal
decomposition A=UBV^H given the result of bidiag routine
B0=bidiag(A)
"""
m=B.collength
n=B.rowlength
U=eye(B.type,m)
if m > n: #extract last column if matrix is not square
i=n-1
v=B[i:,i:].colview(0).copy
v[0]=1
houseProd(v,U[i:,i:])
for i in range(n-2,-1,-1):
v=B[i:,i:].colview(0).copy
v[0]=1
houseProd(v,U[i:,i:])
return U
def VHmatExtract(B):
"""
VH=UmatExtract(B0)
returns, out of place, the hermtian V matrix of the bidiagonal
decomposition A=UBV^H given the result of bidiag routine
B0=bidiag(A)
"""
m=B.collength
n=B.rowlength
V=eye(B.type,n)
for i in range(n-3,-1,-1):
j=i+1
v=B[i:,j:].rowview(0).copy
v[0]=1
prodHouse(V[j:,j:],v)
return V
def givensCoef(x1_in,x2_in):
""" Code adapted from Algorithm 1 of LAPACK working Notes lawn148
"""
if type(x1_in) is int:
x1=float(x1_in)
else:
x1 = x1_in
if type(x2_in) is int:
x2=float(x2_in)
else:
x2 = x2_in
if type(x1) is float and type(x2) is float:
t=pv.vsip_hypot_d(x1,x2)
if x2 == 0.0:
return (1.0,0.0,x1)
elif x1 == 0.0:
return (0.0,sign(x2),t)
else: # return (c,s,r)
sn=sign(x1)
return(pv.vsip_mag_d(x1)/t,sn*x2/t,sn*t)
elif type(x1) is complex or type(x2) is complex:
mx1=pv.vsip_hypot_d(x1.real,x1.imag)
mx2=pv.vsip_hypot_d(x2.real,x2.imag)
if mx2 == 0.0:
return(1.0,0.0,x1)
elif mx1 == 0.0:
return(0,sign(x2.conjugate()),mx2)
else:
t=pv.vsip_hypot_d(mx1,mx2)
c=mx1/t
sn=sign(x1)
s=(sn * x2.conjugate())/t
r=sn * t
return(c,s,r)
else:
print('Type <:'+repr(type(x1)) + ':> or <:'+ \
repr(type(x2))+':> not recognized by givensCoef')
return
def givensExtract(t,i,j,c,s,size):
"""
Usage:
G=givensExtract(t,i,j,c,s,size)
t = type
i,j are index values for placement of
c,s which are obtained (probably) from function givensCoef.
size is an integer
"""
G=eye(t,size)
G[i,i]=c;G[j,j]=c;G[i,j]=s;G[j,i]=-s.conjugate()
return G
def gProd(i,j,c,s,A):
a1=A.rowview(i).copy
a2=A.rowview(j).copy
A.rowview(i)[:]= c * a1 + s * a2
A.rowview(j)[:]= c * a2 - s.conjugate() * a1
return A
def prodG(A,i,j,c,s):
a_i=A.colview(i).copy
a_j=A.colview(j).copy
A.colview(i)[:]= c * a_i + s.conjugate() * a_j
A.colview(j)[:]= c * a_j - s * a_i
return A
def gtProd(i,j,c,s,A):
a_i=A.rowview(i).copy
a_j=A.rowview(j).copy
A.rowview(i)[:]= c * a_i + s.conjugate() * a_j
A.rowview(j)[:]= c * a_j - s * a_i
return A
def prodGT(A,i,j,c,s):
a1 = A.colview(i).copy
a2 = A.colview(j).copy
A.colview(i)[:] = c * a1 + s * a2
A.colview(j)[:] = c * a2 -s.conjugate() * a1
return A
def givensQR(A):
M = A.collength
N = A.rowlength
R = A.copy
Q = eye(A.type,M)
for i in range(N):
B=R[i:,i:]
r=B[0,0]
for j in range(1,B.collength):
c,s,r=givensCoef(r,B[j,0])
prodG(Q,i,j+i,c,s)
gProd(0,j,c,s,B)
return (Q,R)
def givensBidiag(A):
M = A.collength
N = A.rowlength
B = A.copy
U = eye(A.type,M)
VH = eye(A.type,N)
for i in range(N-1):
TC=B[i:,i:]
if i < N-2:
TR=B[i:,i+1:]
r=TC[0,0]
for j in range(1,TC.collength):
c,s,r=givensCoef(r,TC[j,0])
prodG(U,i,j+i,c,s)
gProd(0,j,c,s,TC)
if i < N-2:
r=TR[0,0]
k=i+1
for j in range(1,TR.rowlength):
c,s,r=givensCoef(r,TR[0,j])
gtProd(k,j+k,c,s,VH)
prodGT(TR,0,j,c,s)
if M > N:
i=N-1
TC=B[i:,i:]
r=TC[0,0]
for j in range(1,TC.collength):
c,s,r=givensCoef(r,TC[j,0])
prodG(U,i,j+i,c,s)
gProd(0,j,c,s,TC)
return (U,B,VH)
def svdZeroCheckAndSet(e,b0,b1):
"""
Usage:
svdZeroCheckAndSet(eps,d,f)
Where:
eps0 is a small number we consider to be (close to) zero
d is a vector view representing the main diagonal of an upper bidiagonal matrix
f is a vector view representing the superdiagonal in an upper bidiagonal matrix.
In the svd algorithm this checks the superdiagonal for small numbers which
may be set to zero. If found, set to zero.
"""
s=e * (b0[0:b1.length].mag + b0[1:].mag)
indx_bool = b1.mag.llt(s)
if indx_bool.anytrue: #check super diagonal
b1.indxFill(indx_bool.indexbool,0.0)
def svdCorners(b1):
"""
Functionality
i,j = svdCorners(v)
where
v is a real vector of type float or double
i,j are indices.
i,j; as returned
v[i:j-1] will be vector with no zero elements
v[j-1:] will be a vector with all zero elements
Note v is the first super-diagonal of a bidiagonal matrix.
The corresponding main diagonal, d, will be d[i:j]
"""
v_bool=b1.leq(0.0)
j=v_bool.length-1
while j >= 0 and v_bool[j] == 1:
j -= 1
if j == -1:
return(0,0) #all of b1 is zero
i=j #index of non-zero
j+=1 #index of zero
while i >= 0 and v_bool[i] == 0:
i -= 1
return(i+1,j+1)
def diagPhaseToZero(L,B):
"""
To phase shift the main diagonal entries of a matrix B so entries
are real (imaginary zero) use this routine.
"""
d = B.diagview(0)
for i in range(d.length):
ps=d[i] #phase shift
if ps.imag != 0.0: #ignore if already real
m = pv.vsip_hypot_d(ps.real,ps.imag)
ps /= m
L.colview(i)[:] *= ps
B.rowview(i)[:] *= ps # if B is strictly diagonal don't need this step
d[i] = m
def biDiagPhaseToZero(L,d,f,R,eps0):
"""
For a Bidiagonal matrix B This routine uses subview vectors
`d=B.diagview(0)`
and
`f=B.diagview(1)`
and phase shifts vectors d and f so that B has zero complex part.
Matrices L and R are update matrices.
eps0 is a small real number used to check for zero. If an element meets a zero
check then that element is set to zero.
"""
for i in range(d.length):
ps=d[i]
if ps.imag == 0.0:
m = ps.real
if m < 0.0:
ps=-1.0
else:
ps= 1.0
m = abs(m)
else:
m=pv.vsip_hypot_d(ps.real,ps.imag)
ps /= m
if m > eps0:
L.colview(i)[:] *= ps
d[i] = m
if i < f.length:
f[i] *= ps.conjugate()
else:
d[i] = 0.0
svdZeroCheckAndSet(eps0,d,f)
for i in range(f.length-1):
j=i+1
ps = f[i]
if ps.imag == 0.0:
m = ps.real
if m < 0.0:
ps=-1.0
else:
ps= 1.0
m = abs(m)
else:
m=pv.vsip_hypot_d(ps.real,ps.imag)
ps /= m
L.colview(j)[:] *= ps.conjugate()
R.rowview(j)[:] *= ps
f[i] = m;
f[j] *= ps
j=f.length
i=j-1
ps=f[i]
if ps.imag == 0.0:
m = ps.real
if m < 0.0:
ps=-1.0
else:
ps= 1.0
m = abs(m)
else:
m=pv.vsip_hypot_d(ps.real,ps.imag)
ps /= m
f[i]=m
L.colview(j)[:] *= ps.conjugate()
R.rowview(j)[:] *= ps
def zeroRow(L,d,f):
"""
To use this we assume a matrix B that is bi-diagonalized.
Note i,j = svdCorners(B) => i, j=n+1
Let d0 be B.diagview(0); f0 be B.diagview(1)
d is a subview of the main diagonal
f is a subview of the first superdiagonal (diagonal(1)) and has no zeros.
if f = f0[i:n] then d = d0[i:n+1]
L is a subview of the left update matrix we call L0 here.
for the indices shown above
L = L0[:,i:n+1]
If d contains a zero entry, and the zero entry is not at the end of d,
then zeroRow is used to zero out the corresponding superdiagonal entry
in the row. Vector d may contain more than one zero. We zero out the zero
with the largest index (we designate k). So d[k] = d0[i+k] is the zero
of interest.
Note if d[k] is the last entry then the corresponding superdiagonal entry
in the row is already zero. Use zeroCol to zero out the column.
Usage:
zeroRow(L[:,k:],d[k+1:],f[k:])
"""
if 'cvview' in d.type or 'cvview' in f.type:
print('zeroRow only works for real vectors')
return
if d.length == 1:
c,s,r=givensCoef(d[0],f[0])
f[0]=0.0;d[0]=r
else:
c,s,r=givensCoef(d[0],f[0])
f[0]=0;d[0]=r
t= - f[1] * s; f[1] *= c
prodG(L,1,0,c,s)
for i in range(1,d.length-1):
c,s,r=givensCoef(d[i],t)
prodG(L,i+1,0,c,s)
d[i]=r; t=-f[i+1] * s; f[i+1] *= c
c,s,r=givensCoef(d[d.length-1],t)
d[d.length-1] = r
prodG(L,d.length,0,c,s)
def zeroCol(d,f,R):
"""
To use this we assume a matrix B that is bi-diagonalized.
Note i,j = svdCorners(B) => i, j=n+1
Let d0 be B.diagview(0); f0 be B.diagview(1)
d is a subview of the main diagonal
f is a subview of the first superdiagonal (diagonal(1)) and has no zeros.
if f = f0[i:n] then d = d0[i:n+1]
R is a subview of the right update matrix we call R0 here.
for the indices shown above
R = R0[i:n+1,:]
We assume matrix B has all zeros on row n.
Usage:
zeroCol(d,f,R)
"""
if 'cvview' in d.type or 'cvview' in f.type:
print('zeroCol only works for real vectors')
return
if f.length == 1:
c,s,r=givensCoef(d[0],f[0])
d[0]=r; f[0]=0.0
gtProd(0,1,c,s,R)
elif f.length == 2:
c,s,r=givensCoef(d[1],f[1])
d[1]=r; f[1]=0;
t= - f[0] * s; f[0] *= c
gtProd(1,2,c,s,R)
c,s,r=givensCoef(d[0],t)
d[0]=r;
gtProd(0,2,c,s,R)
else:
i=f.length-1; j=i-1; k=i
c,s,r=givensCoef(d[i],f[i])
f[i]=0; d[i]=r; t=-f[j]*s; f[j]*=c;
gtProd(i,k+1,c,s,R)
while i > 1:
i = j; j = i-1
c,s,r=givensCoef(d[i],t)
d[i]=r; t= - f[j] * s; f[j] *= c
gtProd(i,k+1,c,s,R)
c,s,r=givensCoef(d[0],t)
d[0] = r
gtProd(0,k+1,c,s,R)
def svdMu(d2,f1,d3,f2):
"""
Complex is removed from bidiagonal so for this algorithm we expect real numbers.
"""
td=d2*d2; tf=f1*f1
if td == 0.0:
cu = tf
elif (td < tf):
cu=tf * (1.+td/tf)
else:
cu=td * (1.+tf/td);
td=d3*d3; tf=f2*f2
if td == 0.0:
cl = tf
elif (td < tf):
cl=tf * (1.+td/tf)
else:
cl=td * (1.+tf/td);
cd = d2 * f2
T = (cu + cl)
D = (cu * cl - cd * cd)/(T*T)
if 4.*D > 1.0:
root = 0.0
else:
root = T * pv.vsip_sqrt_d(1.0 - 4. * D)
lambda1 = (T + root)/(2.); lambda2 = (T - root)/(2.)
if abs(lambda1 - cl) < abs(lambda2 - cl):
mu = lambda1
else:
mu = lambda2
return mu
def svdStep(L,d,f,R):
if 'cvview' in d.type or 'cvview' in f.type:
print('Input vector views must be of type real; Fail for svdStep')
return
n=d.length
#initial step
if n >= 3:
mu = svdMu(d[n-2],f[n-3],d[n-1],f[n-2])
elif n == 2:
mu = svdMu(d[0],0.0,d[1],f[0])
else:
mu = svdMu(d[0],0.0,0.0,0.0)
x1=d[0]; x1 *= x1; x1 -= mu
x2 = d[0] * f[0]
c,s,r=givensCoef(x1,x2)
t=d[0] * c + s * f[0]; f[0] *= c; f[0] -= s * d[0]; d[0] = t;
t=s * d[1]; d[1] *= c;
gtProd(0,1,c,s,R)
for i in range(n-2):
j=i+1; k=i+2
#step
c,s,r = givensCoef(d[i],t)
d[i]=r;
t=c * d[j] - s * f[i]; f[i] *=c ;f[i]+=s*d[j];d[j]=t
t=s * f[j]; f[j] *= c;
prodG(L,i,j,c,s)
#step
c,s,r=givensCoef(f[i],t)
f[i]=r
t=c * d[j] + s * f[j]; f[j] *= c; f[j] -= s * d[j]; d[j] = t
t=s * d[k]; d[k] *= c;
gtProd(j,k,c,s,R)
#final step
i=n-2; j=n-1
c,s,r = givensCoef(d[i],t)
d[i]=r;
t= c * d[j] - s * f[i];
f[i] *= c; f[i] += s * d[j];
d[j]=t
prodG(L,i,j,c,s)
def zeroFind(d,eps0):
j = d.length
xd=d[j-1]
while(xd > eps0):
if (j > 1):
j -= 1;
xd=d[j-1]
elif(j==1):
return 0;
d[j-1]=0.0
return j
def svd(A):
"""
The bidiag routine is used in the svd and bidiag is defined out of place,
so svd is also out of place. The bidiag routine can be done in-place with
a simple change, so the svd can also be done in-place.
Usage:
U,S,VH = svd(A)
A is a matrix with column length >= row length
where U is a unitary matrix of size A.columnlength
S is a real vector of size A.rowlength containing the singular values of A
Note: S is considered here to be a diagonal matrix
VH is a unitary matrix of size A.rowlength
Note:
A = U S VH = U.prod(S.mmul(VH.ROW))
"""
def svdBidiagonal(A):
if 'mview_f' not in A.type and 'mview_d' not in A.type:
print('Input must be a matrix of type float for function svd.')
return
if A.rowlength > A.collength:
print('For svd function input matrix A of size (M,N) must have N >= M')
return(0,0,0,0,0)
if 'mview_d' in A.type:
eps0 = A.normFro/A.rowlength * 1.0E16
else:
eps0 = A.normFro/A.rowlength * 1.0E8
if eps0 == 0.0:
print('Input matrix appears to be zero')
return(0,0,0,0,0)
else:
eps0 = 1.0/eps0
B=bidiag(A)
L=UmatExtract(B)
R=VHmatExtract(B)
biDiagPhaseToZero(L,B.diagview(0),B.diagview(1),R,eps0)
if 'cmview' in B.type:
d0=B.diagview(0).realview.copy
f0=B.diagview(1).realview.copy
else:
d0=B.diagview(0).copy
f0=B.diagview(1).copy
return (L,d0,f0,R,eps0)
def svdIteration(L0,d0,f0,R0,eps0):
cntr=0
maxcntr=5*d0.length
while cntr < maxcntr:
print('%d %d\n'%(d0.length,f0.length))
biDiagPhaseToZero(L0,d0,f0,R0,eps0)
cntr += 1
i,j=svdCorners(f0)
if j == 0:
break
d=d0[i:j]
f=f0[i:j-1]
L=L0[:,i:j]
R=R0[i:j,:]
n=f.length
k=zeroFind(d,eps0)
if k >0:
k -= 1;
if d[n] == 0.0:
zeroCol(d,f,R)
else:
zeroRow(L[:,k:],d[k+1:],f[k:])
else:
svdStep(L,d,f,R)
def svdSort(L,d,R):
indx=d.sort('BYVALUE','DESCENDING')
if 'cmview' in R.type:
R.realview.permute(indx,'ROW')
R.imagview.permute(indx,'ROW')
L[:,0:d.length].realview.permute(indx,'COL')
L[:,0:d.length].imagview.permute(indx,'COL')
else:
R.permute(indx,'ROW')
L[:,0:d.length].permute(indx,'COL')
U,S,f0,VH,eps0 = svdBidiagonal(A)
svdIteration(U,S,f0,VH,eps0)
svdSort(U,S,VH)
return(U,S,VH)
| [
"pyJvsip.vsip_sqrt_d",
"pyJvsip.vsip_mag_d",
"pyJvsip.create",
"pyJvsip.vsip_hypot_d"
] | [((420, 438), 'pyJvsip.create', 'pv.create', (['t', 'n', 'n'], {}), '(t, n, n)\n', (429, 438), True, 'import pyJvsip as pv\n'), ((740, 771), 'pyJvsip.vsip_hypot_d', 'pv.vsip_hypot_d', (['a.real', 'a.imag'], {}), '(a.real, a.imag)\n', (755, 771), True, 'import pyJvsip as pv\n'), ((3057, 3080), 'pyJvsip.vsip_hypot_d', 'pv.vsip_hypot_d', (['x1', 'x2'], {}), '(x1, x2)\n', (3072, 3080), True, 'import pyJvsip as pv\n'), ((12384, 12407), 'pyJvsip.vsip_hypot_d', 'pv.vsip_hypot_d', (['x1', 'x2'], {}), '(x1, x2)\n', (12399, 12407), True, 'import pyJvsip as pv\n'), ((18842, 18875), 'pyJvsip.vsip_hypot_d', 'pv.vsip_hypot_d', (['ps.real', 'ps.imag'], {}), '(ps.real, ps.imag)\n', (18857, 18875), True, 'import pyJvsip as pv\n'), ((3367, 3400), 'pyJvsip.vsip_hypot_d', 'pv.vsip_hypot_d', (['x1.real', 'x1.imag'], {}), '(x1.real, x1.imag)\n', (3382, 3400), True, 'import pyJvsip as pv\n'), ((3412, 3445), 'pyJvsip.vsip_hypot_d', 'pv.vsip_hypot_d', (['x2.real', 'x2.imag'], {}), '(x2.real, x2.imag)\n', (3427, 3445), True, 'import pyJvsip as pv\n'), ((12694, 12727), 'pyJvsip.vsip_hypot_d', 'pv.vsip_hypot_d', (['x1.real', 'x1.imag'], {}), '(x1.real, x1.imag)\n', (12709, 12727), True, 'import pyJvsip as pv\n'), ((12739, 12772), 'pyJvsip.vsip_hypot_d', 'pv.vsip_hypot_d', (['x2.real', 'x2.imag'], {}), '(x2.real, x2.imag)\n', (12754, 12772), True, 'import pyJvsip as pv\n'), ((17107, 17140), 'pyJvsip.vsip_hypot_d', 'pv.vsip_hypot_d', (['ps.real', 'ps.imag'], {}), '(ps.real, ps.imag)\n', (17122, 17140), True, 'import pyJvsip as pv\n'), ((17952, 17985), 'pyJvsip.vsip_hypot_d', 'pv.vsip_hypot_d', (['ps.real', 'ps.imag'], {}), '(ps.real, ps.imag)\n', (17967, 17985), True, 'import pyJvsip as pv\n'), ((18486, 18519), 'pyJvsip.vsip_hypot_d', 'pv.vsip_hypot_d', (['ps.real', 'ps.imag'], {}), '(ps.real, ps.imag)\n', (18501, 18519), True, 'import pyJvsip as pv\n'), ((22459, 22488), 'pyJvsip.vsip_sqrt_d', 'pv.vsip_sqrt_d', (['(1.0 - 4.0 * D)'], {}), '(1.0 - 4.0 * D)\n', (22473, 22488), True, 'import pyJvsip as pv\n'), ((3599, 3624), 'pyJvsip.vsip_hypot_d', 'pv.vsip_hypot_d', (['mx1', 'mx2'], {}), '(mx1, mx2)\n', (3614, 3624), True, 'import pyJvsip as pv\n'), ((12926, 12951), 'pyJvsip.vsip_hypot_d', 'pv.vsip_hypot_d', (['mx1', 'mx2'], {}), '(mx1, mx2)\n', (12941, 12951), True, 'import pyJvsip as pv\n'), ((3268, 3285), 'pyJvsip.vsip_mag_d', 'pv.vsip_mag_d', (['x1'], {}), '(x1)\n', (3281, 3285), True, 'import pyJvsip as pv\n'), ((12595, 12612), 'pyJvsip.vsip_mag_d', 'pv.vsip_mag_d', (['x1'], {}), '(x1)\n', (12608, 12612), True, 'import pyJvsip as pv\n')] |
# Copyright 2016 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from tempest import config
from tripleo_common_tempest_plugin.services import base
CONF = config.CONF
class MistralClientV2(base.MistralClientBase):
def post_request(self, url_path, file_contents):
headers = {"headers": "Content-Type:text/plain"}
return self.post(
url_path,
file_contents,
headers=headers
)
def get_request(self, url_path):
headers = {"headers": "Content-Type:application/json"}
return self.get(url_path, headers=headers)
def post_json(self, url_path, obj, extra_headers={}):
headers = {"Content-Type": "application/json"}
headers = dict(headers, **extra_headers)
return self.post(url_path, json.dumps(obj), headers=headers)
def update_request(self, url_path, file_name):
headers = {"headers": "Content-Type:text/plain"}
resp, body = self.put(
url_path,
base.get_resource(file_name),
headers=headers
)
return resp, json.loads(body)
def create_workbook(self, yaml_file):
resp, body = self.post_request('workbooks', yaml_file)
wb_name = json.loads(body)['name']
self.workbooks.append(wb_name)
_, wfs = self.get_list_obj('workflows')
for wf in wfs['workflows']:
if wf['name'].startswith(wb_name):
self.workflows.append(wf['name'])
return resp, json.loads(body)
def create_execution(self, workflow_name, input_=None):
body = {"workflow_name": workflow_name}
if input_:
body.update({'input': json.dumps(input_)})
resp, body = self.post('executions', json.dumps(body))
self.executions.append(json.loads(body)['id'])
return resp, json.loads(body)
def get_execution(self, execution_id):
return self.get('executions/%s' % execution_id)
def get_executions(self, task_id):
url_path = 'executions'
if task_id:
url_path += '?task_execution_id=%s' % task_id
return self.get_list_obj(url_path)
| [
"json.loads",
"json.dumps",
"tripleo_common_tempest_plugin.services.base.get_resource"
] | [((1340, 1355), 'json.dumps', 'json.dumps', (['obj'], {}), '(obj)\n', (1350, 1355), False, 'import json\n'), ((1549, 1577), 'tripleo_common_tempest_plugin.services.base.get_resource', 'base.get_resource', (['file_name'], {}), '(file_name)\n', (1566, 1577), False, 'from tripleo_common_tempest_plugin.services import base\n'), ((1639, 1655), 'json.loads', 'json.loads', (['body'], {}), '(body)\n', (1649, 1655), False, 'import json\n'), ((1781, 1797), 'json.loads', 'json.loads', (['body'], {}), '(body)\n', (1791, 1797), False, 'import json\n'), ((2050, 2066), 'json.loads', 'json.loads', (['body'], {}), '(body)\n', (2060, 2066), False, 'import json\n'), ((2297, 2313), 'json.dumps', 'json.dumps', (['body'], {}), '(body)\n', (2307, 2313), False, 'import json\n'), ((2393, 2409), 'json.loads', 'json.loads', (['body'], {}), '(body)\n', (2403, 2409), False, 'import json\n'), ((2347, 2363), 'json.loads', 'json.loads', (['body'], {}), '(body)\n', (2357, 2363), False, 'import json\n'), ((2230, 2248), 'json.dumps', 'json.dumps', (['input_'], {}), '(input_)\n', (2240, 2248), False, 'import json\n')] |
"""
This module defines functions that are called on a django signal such as post_migrate.
"""
from serverside import utils
def create_permissions_and_grant_privileges(*args, **kwargs):
"""
Creates database permissions to assign to a user.
Creates django permissions that reflect what a corresponding database user is
allowed to do when directly logged into the database. These permissions are
translated into database privileges and granted to a user when a user is saved.
Args:
args: Postional arguments for compatibility. Not used.
kwargs: Keyworded arguments for compatibility. Not used.
"""
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from serverside.models import User
# Workaround for a decade-old bug in django:
# See here: https://code.djangoproject.com/ticket/10827#no1 and
# here: https://github.com/pytest-dev/pytest-django/issues/18
ContentType.objects.clear_cache()
models = utils.get_all_models(True, False)
for m in models:
codename = utils.get_permission_codename("select", m)
name = f"Can SELECT from {m._meta.db_table} table" # nosec
content_type = ContentType.objects.get_for_model(m)
Permission.objects.update_or_create(
codename=codename, defaults={"name": name, "content_type": content_type}
)
# Grant privileges that existing users already have.
users = User.objects.all()
for user in users:
user.update_db_permissions()
| [
"django.contrib.contenttypes.models.ContentType.objects.get_for_model",
"django.contrib.auth.models.Permission.objects.update_or_create",
"serverside.utils.get_permission_codename",
"django.contrib.contenttypes.models.ContentType.objects.clear_cache",
"serverside.utils.get_all_models",
"serverside.models.... | [((990, 1023), 'django.contrib.contenttypes.models.ContentType.objects.clear_cache', 'ContentType.objects.clear_cache', ([], {}), '()\n', (1021, 1023), False, 'from django.contrib.contenttypes.models import ContentType\n'), ((1038, 1071), 'serverside.utils.get_all_models', 'utils.get_all_models', (['(True)', '(False)'], {}), '(True, False)\n', (1058, 1071), False, 'from serverside import utils\n'), ((1494, 1512), 'serverside.models.User.objects.all', 'User.objects.all', ([], {}), '()\n', (1510, 1512), False, 'from serverside.models import User\n'), ((1112, 1154), 'serverside.utils.get_permission_codename', 'utils.get_permission_codename', (['"""select"""', 'm'], {}), "('select', m)\n", (1141, 1154), False, 'from serverside import utils\n'), ((1246, 1282), 'django.contrib.contenttypes.models.ContentType.objects.get_for_model', 'ContentType.objects.get_for_model', (['m'], {}), '(m)\n', (1279, 1282), False, 'from django.contrib.contenttypes.models import ContentType\n'), ((1292, 1405), 'django.contrib.auth.models.Permission.objects.update_or_create', 'Permission.objects.update_or_create', ([], {'codename': 'codename', 'defaults': "{'name': name, 'content_type': content_type}"}), "(codename=codename, defaults={'name':\n name, 'content_type': content_type})\n", (1327, 1405), False, 'from django.contrib.auth.models import Permission\n')] |
""""""
from scipy import misc
import time
img = 'IMG-20200828-WA0022.'
output0 = img+'..png'
output1 = img+'...png'
img+='jpg'
input0 = misc.imread(img)
input1 = misc.imread(img)
l = input0.shape[0]
c = input1.shape[1]
print('\n %d x %d [%d] \n' %(l, c, (l*c)))
start = time.time()
for x in range(l):
#print(x,end='\t')
for y in range(c):
zero = list(input0[x,y])
um = list(input1[x,y])
if x%2 == y%2:
#verde no 0
zero[0] = zero[2] = (zero[0] + zero[2])//2
# zero[0] = zero[2] = (int(zero[0]) + int(zero[2]))//2
#cinza no 1
um[0]=um[1]=um[2] = (um[0]+um[1]+um[1]+um[2])//4
# um[0]=um[1]=um[2] = (int(um[0])+int(um[1])+int(um[1])+int(um[2]))//4
elif y%2 == 0:
#vermelho no 0
zero[1] = zero[2] = (zero[1] + zero[2])//2
# zero[1] = zero[2] = (int(zero[1]) + int(zero[2]))//2
#verde no 1
um[0] = um[2] = (um[0] + um[2])//2
# um[0] = um[2] = (int(um[0]) + int(um[2]))//2
else:
#azul no 0
zero[1] = zero[0] = (zero[1] + zero[0])//2
# zero[1] = zero[0] = (int(zero[1]) + int(zero[0]))//2
if x%2 == 0:
#vermelho no 1
um[1] = um[2] = (um[1] + um[2])//2
# um[1] = um[2] = (int(um[1]) + int(um[2]))//2
else:
#azul no 1
um[1] = um[0] = (um[1] + um[0])//2
# um[1] = um[0] = (int(um[1]) + int(um[0]))//2
input0[x,y] = zero
input1[x,y] = um
t = time.time() - start
print('%f s \a' %t)
print('%d px \t\t %f px/s \t %f s/px' %(l*c, (l*c)/t, t/(l*c)))
print('%d ln \t\t %f ln/s \t %f s/ln' %(l, l/t, t/l))
print('%d col \t\t %f col/s \t %f s/col' %(c, c/t, t/c))
misc.imsave(output1, input1)
misc.imsave(output0, input0)
#time.sleep(11)
input0 = misc.imread(img)
input1 = misc.imread(img)
start = time.time()
for x in range(l):
#print(x,end='\t')
for y in range(c):
zero = list(input0[x,y])
um = list(input1[x,y])
if x%2 == y%2:
#verde no 0
zero[0] = zero[2] = (int(zero[0]) + int(zero[2]))//2
#cinza no 1
um[0]=um[1]=um[2] = (int(um[0])+int(um[1])+int(um[1])+int(um[2]))//4
elif y%2 == 0:
#vermelho no 0
zero[1] = zero[2] = (int(zero[1]) + int(zero[2]))//2
#verde no 1
um[0] = um[2] = (int(um[0]) + int(um[2]))//2
else:
#azul no 0
zero[1] = zero[0] = (int(zero[1]) + int(zero[0]))//2
if x%2 == 0:
#vermelho no 1
um[1] = um[2] = (int(um[1]) + int(um[2]))//2
else:
#azul no 1
um[1] = um[0] = (int(um[1]) + int(um[0]))//2
input0[x,y] = zero
input1[x,y] = um
t = time.time() - start
print('%f s \a' %t)
print('%d px \t\t %f px/s \t %f s/px' %(l*c, (l*c)/t, t/(l*c)))
print('%d ln \t\t %f ln/s \t %f s/ln' %(l, l/t, t/l))
print('%d col \t\t %f col/s \t %f s/col' %(c, c/t, t/c))
misc.imsave(output1+'.png', input1)
misc.imsave(output0+'.png', input0) | [
"scipy.misc.imsave",
"scipy.misc.imread",
"time.time"
] | [((138, 154), 'scipy.misc.imread', 'misc.imread', (['img'], {}), '(img)\n', (149, 154), False, 'from scipy import misc\n'), ((164, 180), 'scipy.misc.imread', 'misc.imread', (['img'], {}), '(img)\n', (175, 180), False, 'from scipy import misc\n'), ((275, 286), 'time.time', 'time.time', ([], {}), '()\n', (284, 286), False, 'import time\n'), ((1535, 1563), 'scipy.misc.imsave', 'misc.imsave', (['output1', 'input1'], {}), '(output1, input1)\n', (1546, 1563), False, 'from scipy import misc\n'), ((1564, 1592), 'scipy.misc.imsave', 'misc.imsave', (['output0', 'input0'], {}), '(output0, input0)\n', (1575, 1592), False, 'from scipy import misc\n'), ((1619, 1635), 'scipy.misc.imread', 'misc.imread', (['img'], {}), '(img)\n', (1630, 1635), False, 'from scipy import misc\n'), ((1645, 1661), 'scipy.misc.imread', 'misc.imread', (['img'], {}), '(img)\n', (1656, 1661), False, 'from scipy import misc\n'), ((1671, 1682), 'time.time', 'time.time', ([], {}), '()\n', (1680, 1682), False, 'import time\n'), ((2619, 2656), 'scipy.misc.imsave', 'misc.imsave', (["(output1 + '.png')", 'input1'], {}), "(output1 + '.png', input1)\n", (2630, 2656), False, 'from scipy import misc\n'), ((2655, 2692), 'scipy.misc.imsave', 'misc.imsave', (["(output0 + '.png')", 'input0'], {}), "(output0 + '.png', input0)\n", (2666, 2692), False, 'from scipy import misc\n'), ((1320, 1331), 'time.time', 'time.time', ([], {}), '()\n', (1329, 1331), False, 'import time\n'), ((2403, 2414), 'time.time', 'time.time', ([], {}), '()\n', (2412, 2414), False, 'import time\n')] |
'''
Bandidos estocásticos: introducción, algoritmos y experimentos
TFG Informática
Sección 4.2
Figura 1
Autor: <NAME>
'''
import random
import sympy.stats as stats
import matplotlib.pyplot as plt
def randomPolicy(n,machines):
chosen = n*[-1]
totalrwd = 0
for i in range(n):
chosen[i] = random.randint(0,len(machines)-1)
totalrwd += stats.sample(machines[chosen[i]])
return chosen,totalrwd
def exploremtimes(n,machines,m):
chosen = n*[-1]
rwdbyArm = len(machines)*[0]
timesArm = len(machines)*[0]
totalrwd = 0
ind = 0
for i in range(m):
for j in range(len(machines)):
chosen[ind] = j
rwd = stats.sample(machines[chosen[ind]])
totalrwd += rwd
rwdbyArm[j] += rwd
timesArm[j] += 1
ind += 1
best = 0
bestVal = rwdbyArm[0]/timesArm[0]
for i in range(1,len(machines)):
val = rwdbyArm[i]/timesArm[i]
if val > bestVal:
bestVal = val
best = i
while ind < n:
chosen[ind] = best
rwd = stats.sample(machines[chosen[ind]])
totalrwd += rwd
ind += 1
return chosen,totalrwd
def computeCumRegret(n,testnum,probs,chosen):
regret = n*[0]
acum = testnum*[0]
maxprob = max(probs)
for j in range(n):
for i in range(testnum):
acum[i] += (maxprob - probs[chosen[i][j]])
regret[j] += acum[i]
regret[j] /= testnum
return regret
nmachines = 2
probs = [0.2,0.7]
machines = nmachines*[None]
for i in range(nmachines):
pmf = {0: 1 - probs[i], 1: probs[i]}
machines[i] = stats.FiniteRV('B('+str(i)+')',pmf)
n = 50
testnum = 100
chosen = testnum*[None]
totalrwd = testnum*[0]
for i in range(testnum):
chosen[i], totalrwd[i] = randomPolicy(n,machines)
regret = computeCumRegret(n,testnum,probs,chosen)
plt.plot(regret,color='tab:blue')
for i in range(testnum):
chosen[i], totalrwd[i] = exploremtimes(n,machines,1)
regret = computeCumRegret(n,testnum,probs,chosen)
plt.plot(regret,color='orange')
for i in range(testnum):
chosen[i], totalrwd[i] = exploremtimes(n,machines,4)
regret = computeCumRegret(n,testnum,probs,chosen)
plt.plot(regret,color='green')
for i in range(testnum):
chosen[i], totalrwd[i] = exploremtimes(n,machines,8)
regret = computeCumRegret(n,testnum,probs,chosen)
plt.plot(regret,color='red')
for i in range(testnum):
chosen[i], totalrwd[i] = exploremtimes(n,machines,12)
regret = computeCumRegret(n,testnum,probs,chosen)
plt.plot(regret,color='tab:purple')
for i in range(testnum):
chosen[i], totalrwd[i] = exploremtimes(n,machines,16)
regret = computeCumRegret(n,testnum,probs,chosen)
plt.plot(regret,color='brown')
#plt.legend(['4 exploraciones','8 exploraciones','12 exploraciones','16 exploraciones'])
plt.legend(['aleatorio','1 exploración','4 exploraciones','8 exploraciones','12 exploraciones','16 exploraciones'])
plt.xlabel("Ronda")
plt.ylabel("Remordimiento acumulado")
fig = plt.gcf()
fig.savefig("Bernoulli.pdf",format='pdf')
plt.show()
| [
"sympy.stats.sample",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((1943, 1977), 'matplotlib.pyplot.plot', 'plt.plot', (['regret'], {'color': '"""tab:blue"""'}), "(regret, color='tab:blue')\n", (1951, 1977), True, 'import matplotlib.pyplot as plt\n'), ((2111, 2143), 'matplotlib.pyplot.plot', 'plt.plot', (['regret'], {'color': '"""orange"""'}), "(regret, color='orange')\n", (2119, 2143), True, 'import matplotlib.pyplot as plt\n'), ((2279, 2310), 'matplotlib.pyplot.plot', 'plt.plot', (['regret'], {'color': '"""green"""'}), "(regret, color='green')\n", (2287, 2310), True, 'import matplotlib.pyplot as plt\n'), ((2444, 2473), 'matplotlib.pyplot.plot', 'plt.plot', (['regret'], {'color': '"""red"""'}), "(regret, color='red')\n", (2452, 2473), True, 'import matplotlib.pyplot as plt\n'), ((2608, 2644), 'matplotlib.pyplot.plot', 'plt.plot', (['regret'], {'color': '"""tab:purple"""'}), "(regret, color='tab:purple')\n", (2616, 2644), True, 'import matplotlib.pyplot as plt\n'), ((2780, 2811), 'matplotlib.pyplot.plot', 'plt.plot', (['regret'], {'color': '"""brown"""'}), "(regret, color='brown')\n", (2788, 2811), True, 'import matplotlib.pyplot as plt\n'), ((2902, 3026), 'matplotlib.pyplot.legend', 'plt.legend', (["['aleatorio', '1 exploración', '4 exploraciones', '8 exploraciones',\n '12 exploraciones', '16 exploraciones']"], {}), "(['aleatorio', '1 exploración', '4 exploraciones',\n '8 exploraciones', '12 exploraciones', '16 exploraciones'])\n", (2912, 3026), True, 'import matplotlib.pyplot as plt\n'), ((3018, 3037), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Ronda"""'], {}), "('Ronda')\n", (3028, 3037), True, 'import matplotlib.pyplot as plt\n'), ((3038, 3075), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Remordimiento acumulado"""'], {}), "('Remordimiento acumulado')\n", (3048, 3075), True, 'import matplotlib.pyplot as plt\n'), ((3082, 3091), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (3089, 3091), True, 'import matplotlib.pyplot as plt\n'), ((3134, 3144), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3142, 3144), True, 'import matplotlib.pyplot as plt\n'), ((369, 402), 'sympy.stats.sample', 'stats.sample', (['machines[chosen[i]]'], {}), '(machines[chosen[i]])\n', (381, 402), True, 'import sympy.stats as stats\n'), ((1114, 1149), 'sympy.stats.sample', 'stats.sample', (['machines[chosen[ind]]'], {}), '(machines[chosen[ind]])\n', (1126, 1149), True, 'import sympy.stats as stats\n'), ((697, 732), 'sympy.stats.sample', 'stats.sample', (['machines[chosen[ind]]'], {}), '(machines[chosen[ind]])\n', (709, 732), True, 'import sympy.stats as stats\n')] |
import re
from datetime import date
import tagging
from ckeditor.fields import RichTextField
from django.core import validators
from django.core.urlresolvers import reverse
from django.db import models
from django.template.defaultfilters import truncatechars
from django.utils import timezone
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from common.constants import SUMMARY_MAX_LENGTH, STATUS_PENDING, STATUS_PUBLISHED
from common.functions import instance_get_thumbnail
from common.models import PriorityModel, AbstractPermalink
import files_widget
from organization.models import Organization
from party.models import Party
from tagging_autocomplete_tagit.models import TagAutocompleteTagItField
from taxonomy.models import ArticleCategory
USER_MODEL = settings.AUTH_USER_MODEL
class CommonCms(AbstractPermalink):
title = models.CharField(max_length=255)
image = files_widget.ImageField(null=True, blank=True)
summary = models.TextField(null=True, blank=True)
description = RichTextField(null=True, blank=True)
topics = models.ManyToManyField('taxonomy.Topic', null=True, blank=True, related_name='cms_topics')
party_created_by = models.ForeignKey(Party, related_name='cms_party_created_by')
created_by = models.ForeignKey(USER_MODEL, related_name='cms_created_by')
published_by = models.ForeignKey(USER_MODEL, related_name='cms_published_by', null=True, blank=True)
status = models.IntegerField(default=STATUS_PUBLISHED)
created = models.DateTimeField(auto_now_add=True, null=True, blank=True)
changed = models.DateTimeField(auto_now=True, null=True, blank=True)
published = models.DateTimeField(null=True, blank=True)
is_promoted = models.BooleanField(default=True)
facebook_url = models.URLField(max_length=255, null=True, blank=True)
twitter_url = models.URLField(max_length=255, null=True, blank=True)
homepage_url = models.URLField(max_length=1024, null=True, blank=True)
uuid = models.CharField(max_length=255, null=True, blank=True)
class Meta:
ordering = ['-id']
def get_display_name(self):
return self.title or ''
def get_thumbnail(self):
return instance_get_thumbnail(self, crop=None, size='x360')
def get_thumbnail_in_primary(self):
return instance_get_thumbnail(self, size='150x150', crop=None, upscale=False)
def get_summary(self):
return truncatechars(self.summary or self.description or '', SUMMARY_MAX_LENGTH)
def get_absolute_url(self):
if hasattr(self, 'news') and self.news:
return self.news.get_absolute_url()
elif hasattr(self, 'event') and self.event:
return self.event.get_absolute_url()
return ''
def save(self, *args, **kwargs):
super(CommonCms, self).save(*args, **kwargs)
class News(CommonCms):
# relation
#organization = models.ForeignKey(Organization, related_name='news_organization', null=True, blank=True)
# Taxonomy
ARTICLE_TYPE_CHOICES = (
('news', _('News')),
('knowledge-tools', _('Knowledge & Tools')),
)
# deprecate
article_category = models.CharField(max_length=255, choices=ARTICLE_TYPE_CHOICES, default='news')
categories = models.ManyToManyField('taxonomy.ArticleCategory', null=True, blank=True, related_name='cms_categories')
tags = TagAutocompleteTagItField(max_tags=False, null=True, blank=True, max_length=2048)
files = files_widget.XFilesField(verbose_name='File Attachment', null=True, blank=True)
def get_absolute_url(self):
first_category = None
try:
first_category = self.categories.filter(level=0).first()
except (ArticleCategory.DoesNotExist, ValueError):
try:
first_category = self._categories[0]
except (AttributeError, IndexError):
pass
if first_category:
if first_category.permalink == 'news':
return reverse('news_detail', args=[self.permalink, self.id])
else:
return reverse('article_detail', args=[first_category.permalink, self.permalink, self.id])
# deprecate
if self.article_category == 'news':
return reverse('news_detail', args=[self.permalink, self.id])
else:
return reverse('article_detail', args=[self.article_category, self.permalink, self.id])
def get_files(self):
files = []
if self.files:
files = [ settings.MEDIA_URL + path for path in self.files.split('\n')]
return files
tagging.register(News, tag_descriptor_attr='tag_set')
class Event(CommonCms):
#organization = models.ForeignKey(Organization, related_name='event_organization', null=True, blank=True)
location = models.TextField(null=True, blank=True)
start_date = models.DateField(null=True, blank=True, default=timezone.now)
end_date = models.DateField(null=True, blank=True)
time = models.CharField(max_length=255, null=True, blank=True)
phone = models.TextField(null=True, blank=True)
email = models.EmailField(
max_length=255,
null=True,
blank=True
)
tags = TagAutocompleteTagItField(max_tags=False, null=True, blank=True, max_length=2048)
def get_absolute_url(self):
return reverse('event_detail', args=[self.permalink, self.id])
def get_phones(self):
return [phone.strip() for phone in self.phone.split(',')]
tagging.register(Event, tag_descriptor_attr='tag_set')
| [
"django.db.models.DateField",
"django.template.defaultfilters.truncatechars",
"django.db.models.TextField",
"django.db.models.IntegerField",
"ckeditor.fields.RichTextField",
"django.core.urlresolvers.reverse",
"files_widget.ImageField",
"django.db.models.ForeignKey",
"django.db.models.DateTimeField"... | [((4629, 4682), 'tagging.register', 'tagging.register', (['News'], {'tag_descriptor_attr': '"""tag_set"""'}), "(News, tag_descriptor_attr='tag_set')\n", (4645, 4682), False, 'import tagging\n'), ((5520, 5574), 'tagging.register', 'tagging.register', (['Event'], {'tag_descriptor_attr': '"""tag_set"""'}), "(Event, tag_descriptor_attr='tag_set')\n", (5536, 5574), False, 'import tagging\n'), ((878, 910), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (894, 910), False, 'from django.db import models\n'), ((923, 969), 'files_widget.ImageField', 'files_widget.ImageField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (946, 969), False, 'import files_widget\n'), ((984, 1023), 'django.db.models.TextField', 'models.TextField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (1000, 1023), False, 'from django.db import models\n'), ((1042, 1078), 'ckeditor.fields.RichTextField', 'RichTextField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (1055, 1078), False, 'from ckeditor.fields import RichTextField\n'), ((1092, 1186), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['"""taxonomy.Topic"""'], {'null': '(True)', 'blank': '(True)', 'related_name': '"""cms_topics"""'}), "('taxonomy.Topic', null=True, blank=True,\n related_name='cms_topics')\n", (1114, 1186), False, 'from django.db import models\n'), ((1206, 1267), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Party'], {'related_name': '"""cms_party_created_by"""'}), "(Party, related_name='cms_party_created_by')\n", (1223, 1267), False, 'from django.db import models\n'), ((1285, 1345), 'django.db.models.ForeignKey', 'models.ForeignKey', (['USER_MODEL'], {'related_name': '"""cms_created_by"""'}), "(USER_MODEL, related_name='cms_created_by')\n", (1302, 1345), False, 'from django.db import models\n'), ((1365, 1454), 'django.db.models.ForeignKey', 'models.ForeignKey', (['USER_MODEL'], {'related_name': '"""cms_published_by"""', 'null': '(True)', 'blank': '(True)'}), "(USER_MODEL, related_name='cms_published_by', null=True,\n blank=True)\n", (1382, 1454), False, 'from django.db import models\n'), ((1465, 1510), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'STATUS_PUBLISHED'}), '(default=STATUS_PUBLISHED)\n', (1484, 1510), False, 'from django.db import models\n'), ((1525, 1587), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'null': '(True)', 'blank': '(True)'}), '(auto_now_add=True, null=True, blank=True)\n', (1545, 1587), False, 'from django.db import models\n'), ((1602, 1660), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)', 'null': '(True)', 'blank': '(True)'}), '(auto_now=True, null=True, blank=True)\n', (1622, 1660), False, 'from django.db import models\n'), ((1677, 1720), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (1697, 1720), False, 'from django.db import models\n'), ((1740, 1773), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (1759, 1773), False, 'from django.db import models\n'), ((1794, 1848), 'django.db.models.URLField', 'models.URLField', ([], {'max_length': '(255)', 'null': '(True)', 'blank': '(True)'}), '(max_length=255, null=True, blank=True)\n', (1809, 1848), False, 'from django.db import models\n'), ((1867, 1921), 'django.db.models.URLField', 'models.URLField', ([], {'max_length': '(255)', 'null': '(True)', 'blank': '(True)'}), '(max_length=255, null=True, blank=True)\n', (1882, 1921), False, 'from django.db import models\n'), ((1941, 1996), 'django.db.models.URLField', 'models.URLField', ([], {'max_length': '(1024)', 'null': '(True)', 'blank': '(True)'}), '(max_length=1024, null=True, blank=True)\n', (1956, 1996), False, 'from django.db import models\n'), ((2009, 2064), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'null': '(True)', 'blank': '(True)'}), '(max_length=255, null=True, blank=True)\n', (2025, 2064), False, 'from django.db import models\n'), ((3181, 3259), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'choices': 'ARTICLE_TYPE_CHOICES', 'default': '"""news"""'}), "(max_length=255, choices=ARTICLE_TYPE_CHOICES, default='news')\n", (3197, 3259), False, 'from django.db import models\n'), ((3278, 3386), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['"""taxonomy.ArticleCategory"""'], {'null': '(True)', 'blank': '(True)', 'related_name': '"""cms_categories"""'}), "('taxonomy.ArticleCategory', null=True, blank=True,\n related_name='cms_categories')\n", (3300, 3386), False, 'from django.db import models\n'), ((3395, 3481), 'tagging_autocomplete_tagit.models.TagAutocompleteTagItField', 'TagAutocompleteTagItField', ([], {'max_tags': '(False)', 'null': '(True)', 'blank': '(True)', 'max_length': '(2048)'}), '(max_tags=False, null=True, blank=True, max_length\n =2048)\n', (3420, 3481), False, 'from tagging_autocomplete_tagit.models import TagAutocompleteTagItField\n'), ((3490, 3569), 'files_widget.XFilesField', 'files_widget.XFilesField', ([], {'verbose_name': '"""File Attachment"""', 'null': '(True)', 'blank': '(True)'}), "(verbose_name='File Attachment', null=True, blank=True)\n", (3514, 3569), False, 'import files_widget\n'), ((4835, 4874), 'django.db.models.TextField', 'models.TextField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (4851, 4874), False, 'from django.db import models\n'), ((4892, 4953), 'django.db.models.DateField', 'models.DateField', ([], {'null': '(True)', 'blank': '(True)', 'default': 'timezone.now'}), '(null=True, blank=True, default=timezone.now)\n', (4908, 4953), False, 'from django.db import models\n'), ((4969, 5008), 'django.db.models.DateField', 'models.DateField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (4985, 5008), False, 'from django.db import models\n'), ((5020, 5075), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'null': '(True)', 'blank': '(True)'}), '(max_length=255, null=True, blank=True)\n', (5036, 5075), False, 'from django.db import models\n'), ((5088, 5127), 'django.db.models.TextField', 'models.TextField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (5104, 5127), False, 'from django.db import models\n'), ((5140, 5196), 'django.db.models.EmailField', 'models.EmailField', ([], {'max_length': '(255)', 'null': '(True)', 'blank': '(True)'}), '(max_length=255, null=True, blank=True)\n', (5157, 5196), False, 'from django.db import models\n'), ((5239, 5325), 'tagging_autocomplete_tagit.models.TagAutocompleteTagItField', 'TagAutocompleteTagItField', ([], {'max_tags': '(False)', 'null': '(True)', 'blank': '(True)', 'max_length': '(2048)'}), '(max_tags=False, null=True, blank=True, max_length\n =2048)\n', (5264, 5325), False, 'from tagging_autocomplete_tagit.models import TagAutocompleteTagItField\n'), ((2221, 2273), 'common.functions.instance_get_thumbnail', 'instance_get_thumbnail', (['self'], {'crop': 'None', 'size': '"""x360"""'}), "(self, crop=None, size='x360')\n", (2243, 2273), False, 'from common.functions import instance_get_thumbnail\n'), ((2330, 2400), 'common.functions.instance_get_thumbnail', 'instance_get_thumbnail', (['self'], {'size': '"""150x150"""', 'crop': 'None', 'upscale': '(False)'}), "(self, size='150x150', crop=None, upscale=False)\n", (2352, 2400), False, 'from common.functions import instance_get_thumbnail\n'), ((2444, 2517), 'django.template.defaultfilters.truncatechars', 'truncatechars', (["(self.summary or self.description or '')", 'SUMMARY_MAX_LENGTH'], {}), "(self.summary or self.description or '', SUMMARY_MAX_LENGTH)\n", (2457, 2517), False, 'from django.template.defaultfilters import truncatechars\n'), ((5370, 5425), 'django.core.urlresolvers.reverse', 'reverse', (['"""event_detail"""'], {'args': '[self.permalink, self.id]'}), "('event_detail', args=[self.permalink, self.id])\n", (5377, 5425), False, 'from django.core.urlresolvers import reverse\n'), ((3071, 3080), 'django.utils.translation.ugettext_lazy', '_', (['"""News"""'], {}), "('News')\n", (3072, 3080), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3111, 3133), 'django.utils.translation.ugettext_lazy', '_', (['"""Knowledge & Tools"""'], {}), "('Knowledge & Tools')\n", (3112, 3133), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4283, 4337), 'django.core.urlresolvers.reverse', 'reverse', (['"""news_detail"""'], {'args': '[self.permalink, self.id]'}), "('news_detail', args=[self.permalink, self.id])\n", (4290, 4337), False, 'from django.core.urlresolvers import reverse\n'), ((4371, 4456), 'django.core.urlresolvers.reverse', 'reverse', (['"""article_detail"""'], {'args': '[self.article_category, self.permalink, self.id]'}), "('article_detail', args=[self.article_category, self.permalink, self.id]\n )\n", (4378, 4456), False, 'from django.core.urlresolvers import reverse\n'), ((4019, 4073), 'django.core.urlresolvers.reverse', 'reverse', (['"""news_detail"""'], {'args': '[self.permalink, self.id]'}), "('news_detail', args=[self.permalink, self.id])\n", (4026, 4073), False, 'from django.core.urlresolvers import reverse\n'), ((4115, 4202), 'django.core.urlresolvers.reverse', 'reverse', (['"""article_detail"""'], {'args': '[first_category.permalink, self.permalink, self.id]'}), "('article_detail', args=[first_category.permalink, self.permalink,\n self.id])\n", (4122, 4202), False, 'from django.core.urlresolvers import reverse\n')] |
from ..util.NameSearchDto import NameSearchDto
from ..util.envNames import VERSION, UPLOAD_FOLDER, path
from ..service.LogService import updateDelete, saveLog, getByPublicId
from ..service.languageBuilder import LanguageBuilder
from ..service.CreateDocumentHandler import getCreatorDocumentHandler
from ..util.RequestEvaluator import RequestEvaluator
from ..util.fileUtils import giveFileNameUnique
from ..util.anonymizationFunctions import encode,markInHtml,disintegration,dataObfuscation
from app.main.service.personalDataSearch import PersonalData
from flask import request, send_from_directory
from flask_restplus import Resource
import os
api = NameSearchDto.api
# Load model before a conections
lb = LanguageBuilder()
lb.defineRulesOfNames()
@api.route("/")
class Index(Resource):
@api.doc('initial operation')
def get(self):
return "Name Search web Service"
@api.route("/version")
class Version(Resource):
@api.doc('show the api version')
def get(self):
return {"version": VERSION}
def registerOperation(evaluator: RequestEvaluator, function:classmethod, nameOperation:str, personalData: PersonalData):
"""
Records the documents in the database and processes them.
:param evaluator: The RequestEvaluator object with the information about the request
:param function: the function to process the data
:param nameOperation: String to indicate the name of the operation performed
:param personalData: The type of data to search for
:return: A dictionary with document id and document type
"""
publicId = saveLog(
{
'name' : evaluator.fakeFilename,
'folder' : path,
'isdelete': False,
'filetype': evaluator.filetype
}
)
nameOfNewDocument = '%s_%s' %(nameOperation, evaluator.fakeFilename)
creator = getCreatorDocumentHandler(
os.path.join(path, evaluator.fakeFilename),
evaluator.filetype,
os.path.join(path, nameOfNewDocument),
function
)
dh = creator.create()
dh.documentsProcessing(personalData)
updateDelete(publicId, True)
publicId = saveLog(
{
'name' : nameOfNewDocument,
'folder' : path,
'isdelete': False,
'filetype': evaluator.filetype
}
)
return {"id":publicId, "fileType":evaluator.filetype}
@api.route("/file/encode")
@api.param('personalData', 'type of personal data to be extracted from the document')
class Anonimization(Resource):
@api.doc(
'returns an id that indicates to the file sent with \
the anonymized data in the database and the type of the file'
)
def post(self):
evaluator = RequestEvaluator(request)
if evaluator.isRequestSuccesfull():
return registerOperation(evaluator,encode, "anonymized",evaluator.personalData)
else:
return evaluator.giveResponse(), 400
@api.route("/file/disintegration")
@api.param('personalData', 'type of personal data to be extracted from the document')
class Disintegration(Resource):
@api.doc(
'returns an id that indicates to the file sent with \
the disintegrated data in the database and the type of the file'
)
def post(self):
evaluator = RequestEvaluator(request)
if evaluator.isRequestSuccesfull():
return registerOperation(evaluator,disintegration,"disintegrated",evaluator.personalData)
else:
return evaluator.giveResponse(), 400
@api.route("/file/obfuscation")
@api.param('personalData', 'type of personal data to be extracted from the document')
class Obfuscation(Resource):
@api.doc(
'returns an id that indicates to the file sent with \
the obfuscated data in the database and the type of the file'
)
def post(self):
evaluator = RequestEvaluator(request)
if evaluator.isRequestSuccesfull():
return registerOperation(evaluator,dataObfuscation,"obfuscated", evaluator.personalData)
else:
return evaluator.giveResponse(), 400
@api.route('/file/extract-data/json')
@api.param('personalData', 'type of personal data to be extracted from the document')
class extractDataJson(Resource):
@api.doc('returns a json object with the requested data')
def post(self):
evaluator = RequestEvaluator(request)
if evaluator.isRequestSuccesfull():
publicId = saveLog(
{
'name' : evaluator.fakeFilename,
'folder' : path,
'isdelete': False,
'filetype': evaluator.filetype
}
)
creator = getCreatorDocumentHandler(
os.path.join(path, evaluator.fakeFilename),
evaluator.filetype
)
dh = creator.create()
names,idCards = dh.extractData(evaluator.personalData)
updateDelete(publicId, True)
return {
"error" : None,
"success": True,
"Names" : names,
"IdCards": idCards
}
else:
return evaluator.giveResponse(), 400
@api.route('/file/extract-data/json-file')
@api.param('personalData', 'type of personal data to be extracted from the document')
class extractDataJsonFile(Resource):
@api.doc('returns a json file with the requested data')
def post(self):
evaluator = RequestEvaluator(request)
if evaluator.isRequestSuccesfull():
publicId = saveLog(
{
'name': evaluator.fakeFilename,
'folder': path,
'isdelete': False,
'filetype': evaluator.filetype
}
)
nameOfNewDocument = evaluator.fakeFilename.replace('.' + evaluator.filetype, ".json")
creator = getCreatorDocumentHandler(
os.path.join(path, evaluator.fakeFilename),
evaluator.filetype,
os.path.join(path, nameOfNewDocument)
)
dh = creator.create()
dh.createDataJsonFile(evaluator.personalData)
updateDelete(publicId, True)
publicId = saveLog(
{
'name': nameOfNewDocument,
'folder': path,
'isdelete': False,
'filetype': 'json'
}
)
return {"id":publicId, "fileType":'json'}
else:
return evaluator.giveResponse(), 400
@api.route('/file/extract-data/csv')
@api.param('personalData', 'type of personal data to be extracted from the document')
class extractDataCsv(Resource):
@api.doc('returns a csv file with the requested data')
def post(self):
evaluator = RequestEvaluator(request)
if evaluator.isRequestSuccesfull():
publicId = saveLog(
{
'name': evaluator.fakeFilename,
'folder': path,
'isdelete': False,
'filetype': evaluator.filetype
}
)
nameOfNewDocument = evaluator.fakeFilename.replace('.' + evaluator.filetype, "_ext.csv")
creator = getCreatorDocumentHandler(
os.path.join(path, evaluator.fakeFilename),
evaluator.filetype,
os.path.join(path, nameOfNewDocument)
)
dh = creator.create()
dh.createDataCsvFile(evaluator.personalData)
updateDelete(publicId, True)
publicId = saveLog(
{
'name': nameOfNewDocument,
'folder': path,
'isdelete': False,
'filetype': 'csv'
}
)
return {"id":publicId, "fileType":'csv'}
else:
return evaluator.giveResponse(), 400
@api.route('/file/download')
@api.param('id', 'public id for a document')
class getDocument(Resource):
@api.doc('return a document using your public id')
def get(self):
publicId = str(request.args['id'])
docuemnt = getByPublicId(publicId)
if docuemnt:
fileSend = send_from_directory(docuemnt.folder, docuemnt.name, as_attachment=True)
updateDelete(publicId, True)
return fileSend
else:
return {"error": "the documento with id %s does not exist" %(publicId)}, 400
@api.route('/file/operation-web')
@api.param('url', 'Url form a web site')
@api.param('op', 'Operation to a html file')
class operationWeb(Resource):
def _json(self, url: str, personalData: PersonalData):
name = giveFileNameUnique('json')
creator = getCreatorDocumentHandler(
url,
'html',
os.path.join(path, name),
isUrl=True
)
try:
dh = creator.create()
dh.createDataJsonFile(personalData)
publicId = saveLog(
{
'name' : name,
'folder' : path,
'isdelete': False,
'filetype': 'json'
}
)
return {"id":publicId, "fileType":'json'}
except Exception:
return {
"url": url,
"success" : False,
"error" : "bad url"
}, 400
def _csv(self, url: str, personalData: PersonalData):
name = giveFileNameUnique('csv')
creator = getCreatorDocumentHandler(
url,
'html',
os.path.join(path, name),
isUrl=True
)
try:
dh = creator.create()
dh.createDataCsvFile(personalData)
publicId = saveLog(
{
'name' : name,
'folder' : path,
'isdelete': False,
'filetype': 'csv'
}
)
return {"id":publicId, "fileType":'csv'}
except Exception:
return {
"url": url,
"success" : False,
"error" : "bad url"
}, 400
def _encode(self,url:str, anonymizationFunction, personalData: PersonalData):
name = giveFileNameUnique('html')
creator = getCreatorDocumentHandler(
url,
'html',
os.path.join(path, name),
anonymizationFunction,
isUrl=True
)
try:
dh = creator.create()
dh.documentsProcessing(personalData)
publicId = saveLog(
{
'name' : name,
'folder' : path,
'isdelete': False,
'filetype': 'html'
}
)
return {"id": publicId, "fileType": 'html'}
except Exception:
return {
"url": url,
"success" : False,
"error" : "bad url"
}, 500
@api.doc(
'depending on the sent parameters it performs some of the \
allowed operations of the service for a web page in html format'
)
def get(self):
typeData = str(request.args['personalData'])
if typeData == "names":
personalData = PersonalData.names
elif typeData == "idCards":
personalData = PersonalData.idCards
elif typeData == "all":
personalData = PersonalData.all
else:
return {
"success" : False,
"error" : "type of personal data incorrect"
}, 400
url = str(request.args['url'])
op = str(request.args['op'])
if op == 'csv':
return self._csv(url, personalData)
elif op == 'json':
return self._json(url, personalData)
elif op == 'encode':
return self._encode(url, encode, personalData)
elif op == 'ofuscation':
return self._encode(url, dataObfuscation, personalData)
elif op == 'disgergation':
return self._encode(url, disintegration, personalData)
elif op == 'target':
return self._encode(url, markInHtml, personalData)
return {
"op" : url,
"success" : False,
"error" : "bad operation"
}, 400
| [
"flask.send_from_directory",
"os.path.join"
] | [((2060, 2102), 'os.path.join', 'os.path.join', (['path', 'evaluator.fakeFilename'], {}), '(path, evaluator.fakeFilename)\n', (2072, 2102), False, 'import os\n'), ((2140, 2177), 'os.path.join', 'os.path.join', (['path', 'nameOfNewDocument'], {}), '(path, nameOfNewDocument)\n', (2152, 2177), False, 'import os\n'), ((8550, 8621), 'flask.send_from_directory', 'send_from_directory', (['docuemnt.folder', 'docuemnt.name'], {'as_attachment': '(True)'}), '(docuemnt.folder, docuemnt.name, as_attachment=True)\n', (8569, 8621), False, 'from flask import request, send_from_directory\n'), ((9158, 9182), 'os.path.join', 'os.path.join', (['path', 'name'], {}), '(path, name)\n', (9170, 9182), False, 'import os\n'), ((9982, 10006), 'os.path.join', 'os.path.join', (['path', 'name'], {}), '(path, name)\n', (9994, 10006), False, 'import os\n'), ((10814, 10838), 'os.path.join', 'os.path.join', (['path', 'name'], {}), '(path, name)\n', (10826, 10838), False, 'import os\n'), ((4942, 4984), 'os.path.join', 'os.path.join', (['path', 'evaluator.fakeFilename'], {}), '(path, evaluator.fakeFilename)\n', (4954, 4984), False, 'import os\n'), ((6206, 6248), 'os.path.join', 'os.path.join', (['path', 'evaluator.fakeFilename'], {}), '(path, evaluator.fakeFilename)\n', (6218, 6248), False, 'import os\n'), ((6302, 6339), 'os.path.join', 'os.path.join', (['path', 'nameOfNewDocument'], {}), '(path, nameOfNewDocument)\n', (6314, 6339), False, 'import os\n'), ((7600, 7642), 'os.path.join', 'os.path.join', (['path', 'evaluator.fakeFilename'], {}), '(path, evaluator.fakeFilename)\n', (7612, 7642), False, 'import os\n'), ((7696, 7733), 'os.path.join', 'os.path.join', (['path', 'nameOfNewDocument'], {}), '(path, nameOfNewDocument)\n', (7708, 7733), False, 'import os\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Test Tango device server for use with scaling tests."""
import argparse
import time
import tango
def delete_server():
"""."""
db = tango.Database()
server = 'TestDeviceServer/1'
server_list = list(db.get_server_list(server))
if server in server_list:
start_time = time.time()
db.delete_server('TestDeviceServer/1')
print('- Delete server: {:.4f} s'.format(time.time() - start_time))
def list_devices():
"""."""
db = tango.Database()
server_instance = 'TestDeviceServer/1'
device_class = 'TestDevice1'
devices1 = list(db.get_device_name(server_instance, device_class))
device_class = 'TestDevice2'
devices2 = list(db.get_device_name(server_instance, device_class))
print('- No. registered devices: {}'.format(len(devices1 + devices2)))
exported_devices = list(db.get_device_exported('test/*'))
print('- No. running devices: {}'.format(len(exported_devices)))
def register(num_devices):
"""."""
db = tango.Database()
device_info = tango.DbDevInfo()
device_info.server = 'TestDeviceServer/1'
device_info._class = 'TestDevice1'
start_time = time.time()
for device_id in range(num_devices):
device_info.name = 'test/test_device/{:05d}'.format(device_id)
db.add_device(device_info)
elapsed = time.time() - start_time
print('- Register devices: {:.4f} s ({:.4f} s/device)'
.format(elapsed, elapsed / num_devices))
def main():
"""."""
parser = argparse.ArgumentParser(description='Device registration time.')
parser.add_argument('num_devices', metavar='N', type=int,
default=1, nargs='?',
help='Number of devices to start.')
args = parser.parse_args()
delete_server()
print('* Registering {} devices'.format(args.num_devices))
register(args.num_devices)
list_devices()
if __name__ == '__main__':
main()
| [
"argparse.ArgumentParser",
"time.time",
"tango.DbDevInfo",
"tango.Database"
] | [((192, 208), 'tango.Database', 'tango.Database', ([], {}), '()\n', (206, 208), False, 'import tango\n'), ((525, 541), 'tango.Database', 'tango.Database', ([], {}), '()\n', (539, 541), False, 'import tango\n'), ((1050, 1066), 'tango.Database', 'tango.Database', ([], {}), '()\n', (1064, 1066), False, 'import tango\n'), ((1085, 1102), 'tango.DbDevInfo', 'tango.DbDevInfo', ([], {}), '()\n', (1100, 1102), False, 'import tango\n'), ((1207, 1218), 'time.time', 'time.time', ([], {}), '()\n', (1216, 1218), False, 'import time\n'), ((1554, 1618), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Device registration time."""'}), "(description='Device registration time.')\n", (1577, 1618), False, 'import argparse\n'), ((347, 358), 'time.time', 'time.time', ([], {}), '()\n', (356, 358), False, 'import time\n'), ((1380, 1391), 'time.time', 'time.time', ([], {}), '()\n', (1389, 1391), False, 'import time\n'), ((455, 466), 'time.time', 'time.time', ([], {}), '()\n', (464, 466), False, 'import time\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 27 15:16:34 2021
@author: ag
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
import matplotlib.transforms as transforms
import re
def confidence_ellipse(x, y, n_std=1.0, weights=None, ax=None, facecolor='none', **kwargs):
"""
Create a plot of the covariance confidence ellipse of *x* and *y*.
Parameters
----------
x, y : array-like, shape (n, )
Input data.
ax : matplotlib.axes.Axes
The axes object to draw the ellipse into.
n_std : float
The number of standard deviations to determine the ellipse's radiuses.
**kwargs
Forwarded to `~matplotlib.patches.Ellipse`
Returns
-------
matplotlib.patches.Ellipse
"""
if x.size != y.size:
raise ValueError("x and y must be the same size")
if not ax:
ax = plt.gca()
if 'label' in kwargs:
kwargs['label'] = re.sub(r'SSP(\d)(\d)(\d)',r'SSP\1-\2.\3', kwargs['label'])
if weights is None:
cov = np.cov(x, y)
mean_x = np.mean(x)
mean_y = np.mean(y)
else:
cov = np.cov(x, y, aweights = weights)
sumw = np.sum(weights)
mean_x = np.sum(x*weights)/sumw
mean_y = np.sum(y*weights)/sumw
pearson = cov[0, 1]/np.sqrt(cov[0, 0] * cov[1, 1])
# Using a special case to obtain the eigenvalues of this
# two-dimensionl dataset.
ell_radius_x = np.sqrt(1 + pearson)
ell_radius_y = np.sqrt(1 - pearson)
ellipse = Ellipse((0, 0), width=ell_radius_x * 2, height=ell_radius_y * 2,
facecolor=f'{facecolor}22', edgecolor=f'{facecolor}', **kwargs)
# Calculating the stdandard deviation of x from
# the squareroot of the variance and multiplying
# with the given number of standard deviations.
scale_x = np.sqrt(cov[0, 0]) * n_std
# calculating the stdandard deviation of y ...
scale_y = np.sqrt(cov[1, 1]) * n_std
transf = transforms.Affine2D() \
.rotate_deg(45) \
.scale(scale_x, scale_y) \
.translate(mean_x, mean_y)
ellipse.set_transform(transf + ax.transData)
return ax.add_patch(ellipse)
#FROM https://stackoverflow.com/questions/21844024/weighted-percentile-using-numpy
def weighted_quantile(values, quantiles, sample_weight=None,
values_sorted=False, old_style=False):
""" Very close to numpy.percentile, but supports weights.
NOTE: quantiles should be in [0, 1]!
:param values: numpy.array with data
:param quantiles: array-like with many quantiles needed
:param sample_weight: array-like of the same length as `array`
:param values_sorted: bool, if True, then will avoid sorting of
initial array
:param old_style: if True, will correct output to be consistent
with numpy.percentile.
:return: numpy.array with computed quantiles.
"""
values = np.array(values)
quantiles = np.array(quantiles)
if sample_weight is None:
sample_weight = np.ones(len(values))
sample_weight = np.array(sample_weight)
assert np.all(quantiles >= 0) and np.all(quantiles <= 1), \
'quantiles should be in [0, 1]'
if not values_sorted:
sorter = np.argsort(values)
values = values[sorter]
sample_weight = sample_weight[sorter]
weighted_quantiles = np.cumsum(sample_weight) - 0.5 * sample_weight
if old_style:
# To be convenient with numpy.percentile
weighted_quantiles -= weighted_quantiles[0]
weighted_quantiles /= weighted_quantiles[-1]
else:
weighted_quantiles /= np.sum(sample_weight)
return np.interp(quantiles, weighted_quantiles, values)
| [
"numpy.mean",
"numpy.all",
"numpy.sqrt",
"matplotlib.pyplot.gca",
"numpy.argsort",
"numpy.array",
"numpy.sum",
"numpy.cov",
"matplotlib.transforms.Affine2D",
"numpy.cumsum",
"numpy.interp",
"re.sub",
"matplotlib.patches.Ellipse"
] | [((1466, 1486), 'numpy.sqrt', 'np.sqrt', (['(1 + pearson)'], {}), '(1 + pearson)\n', (1473, 1486), True, 'import numpy as np\n'), ((1506, 1526), 'numpy.sqrt', 'np.sqrt', (['(1 - pearson)'], {}), '(1 - pearson)\n', (1513, 1526), True, 'import numpy as np\n'), ((1541, 1674), 'matplotlib.patches.Ellipse', 'Ellipse', (['(0, 0)'], {'width': '(ell_radius_x * 2)', 'height': '(ell_radius_y * 2)', 'facecolor': 'f"""{facecolor}22"""', 'edgecolor': 'f"""{facecolor}"""'}), "((0, 0), width=ell_radius_x * 2, height=ell_radius_y * 2, facecolor=\n f'{facecolor}22', edgecolor=f'{facecolor}', **kwargs)\n", (1548, 1674), False, 'from matplotlib.patches import Ellipse\n'), ((2940, 2956), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (2948, 2956), True, 'import numpy as np\n'), ((2973, 2992), 'numpy.array', 'np.array', (['quantiles'], {}), '(quantiles)\n', (2981, 2992), True, 'import numpy as np\n'), ((3088, 3111), 'numpy.array', 'np.array', (['sample_weight'], {}), '(sample_weight)\n', (3096, 3111), True, 'import numpy as np\n'), ((3675, 3723), 'numpy.interp', 'np.interp', (['quantiles', 'weighted_quantiles', 'values'], {}), '(quantiles, weighted_quantiles, values)\n', (3684, 3723), True, 'import numpy as np\n'), ((902, 911), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (909, 911), True, 'import matplotlib.pyplot as plt\n'), ((965, 1028), 're.sub', 're.sub', (['"""SSP(\\\\d)(\\\\d)(\\\\d)"""', '"""SSP\\\\1-\\\\2.\\\\3"""', "kwargs['label']"], {}), "('SSP(\\\\d)(\\\\d)(\\\\d)', 'SSP\\\\1-\\\\2.\\\\3', kwargs['label'])\n", (971, 1028), False, 'import re\n'), ((1063, 1075), 'numpy.cov', 'np.cov', (['x', 'y'], {}), '(x, y)\n', (1069, 1075), True, 'import numpy as np\n'), ((1093, 1103), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (1100, 1103), True, 'import numpy as np\n'), ((1121, 1131), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (1128, 1131), True, 'import numpy as np\n'), ((1156, 1186), 'numpy.cov', 'np.cov', (['x', 'y'], {'aweights': 'weights'}), '(x, y, aweights=weights)\n', (1162, 1186), True, 'import numpy as np\n'), ((1204, 1219), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (1210, 1219), True, 'import numpy as np\n'), ((1325, 1355), 'numpy.sqrt', 'np.sqrt', (['(cov[0, 0] * cov[1, 1])'], {}), '(cov[0, 0] * cov[1, 1])\n', (1332, 1355), True, 'import numpy as np\n'), ((1864, 1882), 'numpy.sqrt', 'np.sqrt', (['cov[0, 0]'], {}), '(cov[0, 0])\n', (1871, 1882), True, 'import numpy as np\n'), ((1957, 1975), 'numpy.sqrt', 'np.sqrt', (['cov[1, 1]'], {}), '(cov[1, 1])\n', (1964, 1975), True, 'import numpy as np\n'), ((3123, 3145), 'numpy.all', 'np.all', (['(quantiles >= 0)'], {}), '(quantiles >= 0)\n', (3129, 3145), True, 'import numpy as np\n'), ((3150, 3172), 'numpy.all', 'np.all', (['(quantiles <= 1)'], {}), '(quantiles <= 1)\n', (3156, 3172), True, 'import numpy as np\n'), ((3260, 3278), 'numpy.argsort', 'np.argsort', (['values'], {}), '(values)\n', (3270, 3278), True, 'import numpy as np\n'), ((3383, 3407), 'numpy.cumsum', 'np.cumsum', (['sample_weight'], {}), '(sample_weight)\n', (3392, 3407), True, 'import numpy as np\n'), ((3642, 3663), 'numpy.sum', 'np.sum', (['sample_weight'], {}), '(sample_weight)\n', (3648, 3663), True, 'import numpy as np\n'), ((1237, 1256), 'numpy.sum', 'np.sum', (['(x * weights)'], {}), '(x * weights)\n', (1243, 1256), True, 'import numpy as np\n'), ((1277, 1296), 'numpy.sum', 'np.sum', (['(y * weights)'], {}), '(y * weights)\n', (1283, 1296), True, 'import numpy as np\n'), ((1998, 2019), 'matplotlib.transforms.Affine2D', 'transforms.Affine2D', ([], {}), '()\n', (2017, 2019), True, 'import matplotlib.transforms as transforms\n')] |
import re
from flask import request
from flask_wtf import FlaskForm
from flask_wtf.file import FileAllowed, FileField
from wtforms import (HiddenField, IntegerField, SelectField, StringField,
SubmitField, TextAreaField)
from wtforms.validators import (DataRequired, Length, NumberRange, Optional,
ValidationError)
from .models import User
class SearchForm(FlaskForm):
q = StringField('Search', validators=[DataRequired()])
def __init__(self, *args, **kwargs):
if 'formdata' not in kwargs:
kwargs['formdata'] = request.args
if 'csrf_enabled' not in kwargs:
kwargs['csrf_enabled'] = False
super(SearchForm, self).__init__(*args, **kwargs)
def is_isbn_10(form, fieldname):
_sum = 0
isbn_val = form.data.get(fieldname)
isbn = re.sub(r"[-–—\s]", "", isbn_val)
checksum_passed = False
if len(isbn) == 10:
isbn = list(isbn)
if isbn[-1] == "X" or isbn[-1] == "x": # a final x stands for 10
isbn[-1] = 10
for d, i in enumerate(isbn[:-1]):
_sum += (int(d) + 1) * int(i)
checksum_passed = (_sum % 11) == int(isbn[-1])
return checksum_passed
def is_isbn_13(form, fieldname):
_sum = 0
isbn_val = form.data.get(fieldname)
isbn = re.sub(r"[-–—\s]", "", isbn_val)
checksum_passed = False
if len(isbn) == 13 and isbn[0:3] == "978" or "979":
for d, i in enumerate(isbn):
if int(d) % 2 == 0:
_sum += int(i)
else:
_sum += int(i) * 3
checksum_passed = _sum % 10 == 0
return checksum_passed
def isbn_10_validator(form, field):
if not is_isbn_10(form, 'isbn_10'):
raise ValidationError('Sorry, is NOT a valid ISBN 10')
return True
def isbn_13_validator(form, field):
if not is_isbn_13(form, 'isbn_13'):
raise ValidationError('Sorry, is NOT a valid ISBN 13')
return True
def isbn_validator(form, field):
if not (is_isbn_13(form, 'isbn') or is_isbn_10(form, 'isbn')):
raise ValidationError('Sorry, is NOT a valid ISBN')
return True
class AddBookForm(FlaskForm):
title = StringField(
'Title',
validators=[DataRequired(), Length(min=1, max=140, message='Too long')]
)
author = StringField(
'Author',
validators=[DataRequired(), Length(min=1, max=140, message='Too long')]
)
isbn_10 = StringField(
'ISBN 10 (leave blank if no ISBN)',
validators=[Optional(), isbn_10_validator]
)
isbn_13 = StringField(
'ISBN 13 (leave blank if no ISBN)',
validators=[Optional(), isbn_13_validator]
)
cover = FileField('Book cover', validators=[
Optional(),
FileAllowed(['jpg', 'jpeg'], '*.jpeg Images only!')
])
submit = SubmitField('Add Book')
class AddBookByIsbnForm(FlaskForm):
isbn = StringField(
'Find book by ISBN',
validators=[DataRequired(), isbn_validator]
)
submit = SubmitField('Try to find a book by ISBN')
class AddIsbnForm(FlaskForm):
isbn_10 = StringField(
'ISBN 10 (leave blank if no ISBN)',
validators=[Optional(), isbn_10_validator]
)
isbn_13 = StringField(
'ISBN 13 (leave blank if no ISBN)',
validators=[Optional(), isbn_13_validator]
)
submit = SubmitField('Check book by ISBN')
class AddCoverForm(FlaskForm):
cover = FileField('Book cover', validators=[
FileAllowed(['jpg', 'jpeg'], '*.jpeg Images only!')
])
submit = SubmitField('Add Book Cover')
class EditBookInstanceForm(FlaskForm):
price = IntegerField(
'My price, ₴',
validators=[NumberRange(min=1, max=9999, message='Invalid price')]
)
condition = SelectField(
'The book condition',
choices=[
('4', 'Идеальное'),
('3', 'Хорошее (читана аккуратно, без пометок и заломов) '),
('2', 'Удовлетворительное'),
('1', 'Как есть (стоит уточннить нюансы с продавцом)')],
validators=[DataRequired()]
)
description = StringField('Description')
submit = SubmitField('Submit')
class MessageForm(FlaskForm):
message = TextAreaField('Message', validators=[
DataRequired(), Length(min=0, max=140)])
submit = SubmitField('Submit')
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user:
raise ValidationError('Please use a different username.')
class EditProfileForm(FlaskForm):
username = StringField('Username')
about_me = TextAreaField('About me / alternative contact info', validators=[Length(min=0, max=140)])
latitude = HiddenField('Latitude', validators=[DataRequired()])
longitude = HiddenField('Longitude', validators=[DataRequired()])
submit = SubmitField('Submit')
def __init__(self, original_username, *args, **kwargs):
super(EditProfileForm, self).__init__(*args, **kwargs)
self.original_username = original_username
def validate_username(self, username):
if username.data != self.original_username:
user = User.query.filter_by(username=self.username.data).first()
if user is not None:
raise ValidationError('Please use a different username.')
| [
"wtforms.validators.NumberRange",
"wtforms.validators.ValidationError",
"flask_wtf.file.FileAllowed",
"wtforms.SubmitField",
"wtforms.StringField",
"wtforms.validators.Optional",
"wtforms.validators.Length",
"re.sub",
"wtforms.validators.DataRequired"
] | [((850, 882), 're.sub', 're.sub', (['"""[-–—\\\\s]"""', '""""""', 'isbn_val'], {}), "('[-–—\\\\s]', '', isbn_val)\n", (856, 882), False, 'import re\n'), ((1326, 1358), 're.sub', 're.sub', (['"""[-–—\\\\s]"""', '""""""', 'isbn_val'], {}), "('[-–—\\\\s]', '', isbn_val)\n", (1332, 1358), False, 'import re\n'), ((2851, 2874), 'wtforms.SubmitField', 'SubmitField', (['"""Add Book"""'], {}), "('Add Book')\n", (2862, 2874), False, 'from wtforms import HiddenField, IntegerField, SelectField, StringField, SubmitField, TextAreaField\n'), ((3037, 3078), 'wtforms.SubmitField', 'SubmitField', (['"""Try to find a book by ISBN"""'], {}), "('Try to find a book by ISBN')\n", (3048, 3078), False, 'from wtforms import HiddenField, IntegerField, SelectField, StringField, SubmitField, TextAreaField\n'), ((3380, 3413), 'wtforms.SubmitField', 'SubmitField', (['"""Check book by ISBN"""'], {}), "('Check book by ISBN')\n", (3391, 3413), False, 'from wtforms import HiddenField, IntegerField, SelectField, StringField, SubmitField, TextAreaField\n'), ((3576, 3605), 'wtforms.SubmitField', 'SubmitField', (['"""Add Book Cover"""'], {}), "('Add Book Cover')\n", (3587, 3605), False, 'from wtforms import HiddenField, IntegerField, SelectField, StringField, SubmitField, TextAreaField\n'), ((4129, 4155), 'wtforms.StringField', 'StringField', (['"""Description"""'], {}), "('Description')\n", (4140, 4155), False, 'from wtforms import HiddenField, IntegerField, SelectField, StringField, SubmitField, TextAreaField\n'), ((4169, 4190), 'wtforms.SubmitField', 'SubmitField', (['"""Submit"""'], {}), "('Submit')\n", (4180, 4190), False, 'from wtforms import HiddenField, IntegerField, SelectField, StringField, SubmitField, TextAreaField\n'), ((4337, 4358), 'wtforms.SubmitField', 'SubmitField', (['"""Submit"""'], {}), "('Submit')\n", (4348, 4358), False, 'from wtforms import HiddenField, IntegerField, SelectField, StringField, SubmitField, TextAreaField\n'), ((4609, 4632), 'wtforms.StringField', 'StringField', (['"""Username"""'], {}), "('Username')\n", (4620, 4632), False, 'from wtforms import HiddenField, IntegerField, SelectField, StringField, SubmitField, TextAreaField\n'), ((4889, 4910), 'wtforms.SubmitField', 'SubmitField', (['"""Submit"""'], {}), "('Submit')\n", (4900, 4910), False, 'from wtforms import HiddenField, IntegerField, SelectField, StringField, SubmitField, TextAreaField\n'), ((1756, 1804), 'wtforms.validators.ValidationError', 'ValidationError', (['"""Sorry, is NOT a valid ISBN 10"""'], {}), "('Sorry, is NOT a valid ISBN 10')\n", (1771, 1804), False, 'from wtforms.validators import DataRequired, Length, NumberRange, Optional, ValidationError\n'), ((1913, 1961), 'wtforms.validators.ValidationError', 'ValidationError', (['"""Sorry, is NOT a valid ISBN 13"""'], {}), "('Sorry, is NOT a valid ISBN 13')\n", (1928, 1961), False, 'from wtforms.validators import DataRequired, Length, NumberRange, Optional, ValidationError\n'), ((2094, 2139), 'wtforms.validators.ValidationError', 'ValidationError', (['"""Sorry, is NOT a valid ISBN"""'], {}), "('Sorry, is NOT a valid ISBN')\n", (2109, 2139), False, 'from wtforms.validators import DataRequired, Length, NumberRange, Optional, ValidationError\n'), ((4506, 4557), 'wtforms.validators.ValidationError', 'ValidationError', (['"""Please use a different username."""'], {}), "('Please use a different username.')\n", (4521, 4557), False, 'from wtforms.validators import DataRequired, Length, NumberRange, Optional, ValidationError\n'), ((467, 481), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (479, 481), False, 'from wtforms.validators import DataRequired, Length, NumberRange, Optional, ValidationError\n'), ((2250, 2264), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (2262, 2264), False, 'from wtforms.validators import DataRequired, Length, NumberRange, Optional, ValidationError\n'), ((2266, 2308), 'wtforms.validators.Length', 'Length', ([], {'min': '(1)', 'max': '(140)', 'message': '"""Too long"""'}), "(min=1, max=140, message='Too long')\n", (2272, 2308), False, 'from wtforms.validators import DataRequired, Length, NumberRange, Optional, ValidationError\n'), ((2380, 2394), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (2392, 2394), False, 'from wtforms.validators import DataRequired, Length, NumberRange, Optional, ValidationError\n'), ((2396, 2438), 'wtforms.validators.Length', 'Length', ([], {'min': '(1)', 'max': '(140)', 'message': '"""Too long"""'}), "(min=1, max=140, message='Too long')\n", (2402, 2438), False, 'from wtforms.validators import DataRequired, Length, NumberRange, Optional, ValidationError\n'), ((2537, 2547), 'wtforms.validators.Optional', 'Optional', ([], {}), '()\n', (2545, 2547), False, 'from wtforms.validators import DataRequired, Length, NumberRange, Optional, ValidationError\n'), ((2665, 2675), 'wtforms.validators.Optional', 'Optional', ([], {}), '()\n', (2673, 2675), False, 'from wtforms.validators import DataRequired, Length, NumberRange, Optional, ValidationError\n'), ((2759, 2769), 'wtforms.validators.Optional', 'Optional', ([], {}), '()\n', (2767, 2769), False, 'from wtforms.validators import DataRequired, Length, NumberRange, Optional, ValidationError\n'), ((2779, 2830), 'flask_wtf.file.FileAllowed', 'FileAllowed', (["['jpg', 'jpeg']", '"""*.jpeg Images only!"""'], {}), "(['jpg', 'jpeg'], '*.jpeg Images only!')\n", (2790, 2830), False, 'from flask_wtf.file import FileAllowed, FileField\n'), ((2986, 3000), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (2998, 3000), False, 'from wtforms.validators import DataRequired, Length, NumberRange, Optional, ValidationError\n'), ((3202, 3212), 'wtforms.validators.Optional', 'Optional', ([], {}), '()\n', (3210, 3212), False, 'from wtforms.validators import DataRequired, Length, NumberRange, Optional, ValidationError\n'), ((3330, 3340), 'wtforms.validators.Optional', 'Optional', ([], {}), '()\n', (3338, 3340), False, 'from wtforms.validators import DataRequired, Length, NumberRange, Optional, ValidationError\n'), ((3504, 3555), 'flask_wtf.file.FileAllowed', 'FileAllowed', (["['jpg', 'jpeg']", '"""*.jpeg Images only!"""'], {}), "(['jpg', 'jpeg'], '*.jpeg Images only!')\n", (3515, 3555), False, 'from flask_wtf.file import FileAllowed, FileField\n'), ((3716, 3769), 'wtforms.validators.NumberRange', 'NumberRange', ([], {'min': '(1)', 'max': '(9999)', 'message': '"""Invalid price"""'}), "(min=1, max=9999, message='Invalid price')\n", (3727, 3769), False, 'from wtforms.validators import DataRequired, Length, NumberRange, Optional, ValidationError\n'), ((4089, 4103), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (4101, 4103), False, 'from wtforms.validators import DataRequired, Length, NumberRange, Optional, ValidationError\n'), ((4283, 4297), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (4295, 4297), False, 'from wtforms.validators import DataRequired, Length, NumberRange, Optional, ValidationError\n'), ((4299, 4321), 'wtforms.validators.Length', 'Length', ([], {'min': '(0)', 'max': '(140)'}), '(min=0, max=140)\n', (4305, 4321), False, 'from wtforms.validators import DataRequired, Length, NumberRange, Optional, ValidationError\n'), ((4713, 4735), 'wtforms.validators.Length', 'Length', ([], {'min': '(0)', 'max': '(140)'}), '(min=0, max=140)\n', (4719, 4735), False, 'from wtforms.validators import DataRequired, Length, NumberRange, Optional, ValidationError\n'), ((4789, 4803), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (4801, 4803), False, 'from wtforms.validators import DataRequired, Length, NumberRange, Optional, ValidationError\n'), ((4859, 4873), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (4871, 4873), False, 'from wtforms.validators import DataRequired, Length, NumberRange, Optional, ValidationError\n'), ((5314, 5365), 'wtforms.validators.ValidationError', 'ValidationError', (['"""Please use a different username."""'], {}), "('Please use a different username.')\n", (5329, 5365), False, 'from wtforms.validators import DataRequired, Length, NumberRange, Optional, ValidationError\n')] |
import sqlite3
connection=sqlite3.connect('customer.db')
cur=connection.cursor()
#create a table
cur.execute(""" CREATE TABLE customers (
first_name text,
last_name text,
email text
)
""")
#commit our command
connection.commit()
#close our connection
connection.close() | [
"sqlite3.connect"
] | [((31, 61), 'sqlite3.connect', 'sqlite3.connect', (['"""customer.db"""'], {}), "('customer.db')\n", (46, 61), False, 'import sqlite3\n')] |
from django.contrib.auth.models import AbstractUser
from django.urls import reverse
from django.db import models
class User(AbstractUser):
# First Name and Last Name do not cover all name patterns
name = models.CharField(verbose_name='Name of User', blank=True, max_length=255)
class Meta:
app_label = 'users'
def __str__(self):
return self.username
def get_absolute_url(self):
return reverse('users:detail', kwargs={'username': self.username})
| [
"django.db.models.CharField",
"django.urls.reverse"
] | [((215, 288), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': '"""Name of User"""', 'blank': '(True)', 'max_length': '(255)'}), "(verbose_name='Name of User', blank=True, max_length=255)\n", (231, 288), False, 'from django.db import models\n'), ((435, 494), 'django.urls.reverse', 'reverse', (['"""users:detail"""'], {'kwargs': "{'username': self.username}"}), "('users:detail', kwargs={'username': self.username})\n", (442, 494), False, 'from django.urls import reverse\n')] |
import sys
import numpy as np
rng = np.random.default_rng()
# dt = np.dtype('i,i,i,i,i,i,i,i,i,i,U16,U16,U16,U16,f,f,f,f,f,f,f,f')
nrows = 2000000
filename = 'data/bigmixed.csv'
print("Generating {}".format(filename))
with open(filename, 'w') as f:
for k in range(nrows):
values1 = rng.integers(1, 1000, size=10).tolist()
values2 = rng.choice(['abc', 'def', 'αβγ', 'apple', 'orange'], size=4).tolist()
values3 = (rng.integers(0, 100, size=8)/8).tolist()
values = values1 + values2 + values3
s = ','.join(f'{v}' for v in values) + '\n'
f.write(s)
q, r = divmod(100*(k+1), nrows)
if r == 0:
print("\r{:3d}%".format(q), end='')
sys.stdout.flush()
print()
| [
"sys.stdout.flush",
"numpy.random.default_rng"
] | [((39, 62), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (60, 62), True, 'import numpy as np\n'), ((723, 741), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (739, 741), False, 'import sys\n')] |
#!/usr/bin/env python3
import argparse
import re
import sys
import zipfile
import numpy as np
import bert_wrapper
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("input_conllu", type=str, help="Input CoNLL-U file")
parser.add_argument("output_npz", type=str, help="Output NPZ file")
parser.add_argument("--batch_size", default=16, type=int, help="Batch size")
parser.add_argument("--casing", default=bert_wrapper.BertWrapper.CASING_UNCASED, help="Bert model casing")
parser.add_argument("--language", default=bert_wrapper.BertWrapper.LANGUAGE_MULTILINGUAL, help="Bert model language")
parser.add_argument("--layer_indices", default="-1,-2,-3,-4", type=str, help="Bert model layers to average")
parser.add_argument("--size", default=bert_wrapper.BertWrapper.SIZE_BASE, help="Bert model size")
parser.add_argument("--threads", default=4, type=int, help="Threads to use")
parser.add_argument("--with_cls", default=False, action="store_true", help="Return also CLS embedding")
args = parser.parse_args()
args.layer_indices = list(map(int, args.layer_indices.split(",")))
# Load CoNLL-U file
sentences = []
with open(args.input_conllu, mode="r", encoding="utf-8") as conllu_file:
in_sentence = False
for line in conllu_file:
line = line.rstrip("\n")
if line:
if not in_sentence:
sentences.append([])
in_sentence = True
if re.match(r"^[0-9]*\t", line):
columns = line.split("\t")
assert len(columns) == 10
sentences[-1].append(columns[1])
else:
in_sentence = False
if line.startswith("#"): continue
print("Loaded CoNLL-U file with {} sentences and {} words.".format(len(sentences), sum(map(len, sentences))), file=sys.stderr)
bert = bert_wrapper.BertWrapper(language=args.language, size=args.size, casing=args.casing, layer_indices=args.layer_indices,
with_cls=args.with_cls, threads=args.threads, batch_size=args.batch_size)
with zipfile.ZipFile(args.output_npz, mode="w", compression=zipfile.ZIP_STORED) as output_npz:
for i, embeddings in enumerate(bert.bert_embeddings(sentences)):
if (i + 1) % 100 == 0: print("Processed {}/{} sentences.".format(i + 1, len(sentences)), file=sys.stderr)
with output_npz.open("arr_{}".format(i), mode="w") as embeddings_file:
np.save(embeddings_file, embeddings)
print("Done, all embeddings saved.", file=sys.stderr)
| [
"zipfile.ZipFile",
"argparse.ArgumentParser",
"bert_wrapper.BertWrapper",
"re.match",
"numpy.save"
] | [((157, 182), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (180, 182), False, 'import argparse\n'), ((1946, 2147), 'bert_wrapper.BertWrapper', 'bert_wrapper.BertWrapper', ([], {'language': 'args.language', 'size': 'args.size', 'casing': 'args.casing', 'layer_indices': 'args.layer_indices', 'with_cls': 'args.with_cls', 'threads': 'args.threads', 'batch_size': 'args.batch_size'}), '(language=args.language, size=args.size, casing=\n args.casing, layer_indices=args.layer_indices, with_cls=args.with_cls,\n threads=args.threads, batch_size=args.batch_size)\n', (1970, 2147), False, 'import bert_wrapper\n'), ((2184, 2258), 'zipfile.ZipFile', 'zipfile.ZipFile', (['args.output_npz'], {'mode': '"""w"""', 'compression': 'zipfile.ZIP_STORED'}), "(args.output_npz, mode='w', compression=zipfile.ZIP_STORED)\n", (2199, 2258), False, 'import zipfile\n'), ((1527, 1555), 're.match', 're.match', (['"""^[0-9]*\\\\t"""', 'line'], {}), "('^[0-9]*\\\\t', line)\n", (1535, 1555), False, 'import re\n'), ((2564, 2600), 'numpy.save', 'np.save', (['embeddings_file', 'embeddings'], {}), '(embeddings_file, embeddings)\n', (2571, 2600), True, 'import numpy as np\n')] |
from django.conf.urls import url
from django.contrib import admin
from blog.views import *
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', home),
url(r'^about/', about),
url(r'^kontak/', kontak),
url(r'^blog/', blog),
]
| [
"django.conf.urls.url"
] | [((119, 150), 'django.conf.urls.url', 'url', (['"""^admin/"""', 'admin.site.urls'], {}), "('^admin/', admin.site.urls)\n", (122, 150), False, 'from django.conf.urls import url\n'), ((158, 173), 'django.conf.urls.url', 'url', (['"""^$"""', 'home'], {}), "('^$', home)\n", (161, 173), False, 'from django.conf.urls import url\n'), ((181, 202), 'django.conf.urls.url', 'url', (['"""^about/"""', 'about'], {}), "('^about/', about)\n", (184, 202), False, 'from django.conf.urls import url\n'), ((210, 233), 'django.conf.urls.url', 'url', (['"""^kontak/"""', 'kontak'], {}), "('^kontak/', kontak)\n", (213, 233), False, 'from django.conf.urls import url\n'), ((241, 260), 'django.conf.urls.url', 'url', (['"""^blog/"""', 'blog'], {}), "('^blog/', blog)\n", (244, 260), False, 'from django.conf.urls import url\n')] |
import sys, math, numpy, struct
import matplotlib.pyplot as plt
class readBinaryModels(object):
'''Class for reading binary models'''
def __init__(self, fil):
'''Initialize'''
super(readBinaryModels, self).__init__()
self.fread = open(fil, "rb")
self.head = None
self.model = None
def close(self):
'''Close file'''
self.fread.close()
def __readHeader(self):
'''Return header'''
head = []
byte = self.fread.read(4)
if len(byte) == 0:
return None
head.append(*struct.unpack('i', byte))
head.append(*struct.unpack('d', self.fread.read(8)))
head.append(*struct.unpack('d', self.fread.read(8)))
head.append(*struct.unpack('i', self.fread.read(4)))
head.append(*struct.unpack('i', self.fread.read(4)))
return head
def nextModel(self):
'''Calculate next model, unpacked'''
# Read header
self.head = self.__readHeader()
if self.head is None:
return False
self.model = []
for ii in range(self.head[3]):
s = []
for jj in range(self.head[4]):
s.append(*struct.unpack('d', self.fread.read(8)))
self.model.append(s)
return True
def readOnlyHeader(self):
'''Look only for the header and skip the rest'''
# Read header
self.head = self.__readHeader()
if self.head is None:
return False
# Skip file
for ii in range(head[3]):
for jj in range(head[4]):
self.fread.read(8)
return True
def main():
'''Get evolution of one element in epsilon or [X/Fe]'''
# Check arguments
if len(sys.argv) < 4:
print("Usage python {} <(eps|xfe|massf)> <model>".format(sys.argv[0]), end = " ")
print("<elem1> [elem2, elem3, ...]")
return 1
data = "../../data/species.dat"
mode = sys.argv[1]
archivo = sys.argv[2]
elms = sys.argv[3:]
solarH = 0.7381
# Read "species.dat" and store all the values in lists
species = "../../data/species.dat"
atomicNum = []; atomicMass = []; namesZ = {}
with open(species, "r") as fread:
for line in fread:
lnlst = line.split()
# Correct special names
if lnlst[1] == "d" or lnlst[2] == "0":
lnlst[1] = "h"
# Now relate positions with atomic numbers, atomic masses, and names
zNum = int(lnlst[0]) - int(lnlst[2])
atomicNum.append(zNum)
atomicMass.append(int(lnlst[0]))
namesZ[lnlst[1]] = zNum
# Read all initial solar values
solar = "../../data/solarVals.dat"
solarValues = {}
with open(solar, "r") as fread:
for line in fread:
lnlst = line.split()
isotName = lnlst[0] + lnlst[2]
# Add mass fraction value per atomic number
key = namesZ[lnlst[0]]; val = float(lnlst[1])*float(lnlst[2])
solarValues[key] = solarValues.get(key, 0) + val
# Now go model by model, calculating everything for every element
modelObj = readBinaryModels(archivo)
# Each line has mass, temperature, rho, radiat
# and elements in number fraction
ages = []; evolEps = []; evolXFe = []; evolMassF = []
while True:
isNewModel = modelObj.nextModel()
if not isNewModel:
break
header = modelObj.head
model = modelObj.model
# Get the age
age = 10**(header[2] - 3)
if len(ages) == 0:
ages.append(age)
else:
ages.append(age - ages[0])
# Report some progress
print(len(ages))
# Find the surface for this model
for ii in range(1, len(model)):
mass = (model[ii - 1][0] + model[ii][0])*0.5
# If found surface, extract information
if mass >= 0.85:
prevLine = model[ii - 1]
newLine = model[ii]
# Take all abundances
dens = [(x + y)*0.5 for (x, y) in zip(prevLine[4:], newLine[4:])]
epsVals = {}; xFeVals = {}; mFVals = {}
# Add the values for each element
for ii in range(len(atomicNum)):
key = atomicNum[ii]
epsVals[key] = epsVals.get(key, 0) + dens[ii]
mFVals[key] = mFVals.get(key, 0) + dens[ii]*atomicMass[ii]
xFeVals[key] = mFVals[key]
# Now calculate values of interest
feVal = xFeVals[namesZ["fe"]]
sunFeVal = solarValues[namesZ["fe"]]
selectedEps = []; selectedFe = []; selectedMassF = []
for elem in elms:
try:
val = epsVals[namesZ[elem]]/solarH + 1e-100
except KeyError:
print("{} is not on the list".format(elem))
except:
raise
val = math.log10(val) + 12
selectedEps.append(val)
try:
val = xFeVals[namesZ[elem]]/feVal + 1e-100
except KeyError:
print("{} is not on the list".format(elem))
except:
raise
sunVal = solarValues.get(namesZ[elem], 1e-100)/sunFeVal
val = math.log10(val) - math.log10(sunVal)
selectedFe.append(val)
try:
val = mFVals[namesZ[elem]] + 1e-100
except KeyError:
print("{} is not on the list".format(elem))
except:
raise
selectedMassF.append(val)
break
evolEps.append(selectedEps)
evolXFe.append(selectedFe)
evolMassF.append(selectedMassF)
# Transform age and evol values to something plottable
ages[0] = 0
evolEps = numpy.transpose(numpy.array(evolEps))
evolXFe = numpy.transpose(numpy.array(evolXFe))
evolMassF = numpy.transpose(numpy.array(evolMassF))
# Now plot values
if mode == "eps":
for ii in range(len(elms)):
evEps = evolEps[ii]
minLen = min(len(ages), len(evEps))
plt.plot(ages[0:minLen], evEps[0:minLen], label = elms[ii], lw = 2)
plt.xlabel("TP-AGB time in ky")
plt.ylabel("Log epsilon")
#plt.ylim([-2, 5])
plt.ylim([0, 5])
elif mode == "xfe":
for ii in range(len(elms)):
evXFe = evolXFe[ii]
minLen = min(len(ages), len(evXFe))
plt.plot(ages[0:minLen], evXFe[0:minLen], label = elms[ii], lw = 2)
plt.xlabel("TP-AGB time in ky")
plt.ylabel("[X/Fe]")
plt.ylim([-0.2, 2])
elif mode == "massf":
for ii in range(len(elms)):
evMassF = evolMassF[ii]
minLen = min(len(ages), len(evMassF))
plt.plot(ages[0:minLen], evMassF[0:minLen], label = elms[ii], lw = 2)
plt.xlabel("TP-AGB time in ky")
plt.ylabel("Mass fraction")
plt.yscale("log")
#plt.legend(loc = 0)
plt.show()
return 0
if __name__ == "__main__":
main()
| [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.array",
"struct.unpack",
"matplotlib.pyplot.ylim",
"math.log10",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.show"
] | [((7333, 7343), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7341, 7343), True, 'import matplotlib.pyplot as plt\n'), ((6152, 6172), 'numpy.array', 'numpy.array', (['evolEps'], {}), '(evolEps)\n', (6163, 6172), False, 'import sys, math, numpy, struct\n'), ((6204, 6224), 'numpy.array', 'numpy.array', (['evolXFe'], {}), '(evolXFe)\n', (6215, 6224), False, 'import sys, math, numpy, struct\n'), ((6258, 6280), 'numpy.array', 'numpy.array', (['evolMassF'], {}), '(evolMassF)\n', (6269, 6280), False, 'import sys, math, numpy, struct\n'), ((6532, 6563), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""TP-AGB time in ky"""'], {}), "('TP-AGB time in ky')\n", (6542, 6563), True, 'import matplotlib.pyplot as plt\n'), ((6572, 6597), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Log epsilon"""'], {}), "('Log epsilon')\n", (6582, 6597), True, 'import matplotlib.pyplot as plt\n'), ((6633, 6649), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 5]'], {}), '([0, 5])\n', (6641, 6649), True, 'import matplotlib.pyplot as plt\n'), ((6455, 6518), 'matplotlib.pyplot.plot', 'plt.plot', (['ages[0:minLen]', 'evEps[0:minLen]'], {'label': 'elms[ii]', 'lw': '(2)'}), '(ages[0:minLen], evEps[0:minLen], label=elms[ii], lw=2)\n', (6463, 6518), True, 'import matplotlib.pyplot as plt\n'), ((6880, 6911), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""TP-AGB time in ky"""'], {}), "('TP-AGB time in ky')\n", (6890, 6911), True, 'import matplotlib.pyplot as plt\n'), ((6920, 6940), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""[X/Fe]"""'], {}), "('[X/Fe]')\n", (6930, 6940), True, 'import matplotlib.pyplot as plt\n'), ((6949, 6968), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-0.2, 2]'], {}), '([-0.2, 2])\n', (6957, 6968), True, 'import matplotlib.pyplot as plt\n'), ((590, 614), 'struct.unpack', 'struct.unpack', (['"""i"""', 'byte'], {}), "('i', byte)\n", (603, 614), False, 'import sys, math, numpy, struct\n'), ((6803, 6866), 'matplotlib.pyplot.plot', 'plt.plot', (['ages[0:minLen]', 'evXFe[0:minLen]'], {'label': 'elms[ii]', 'lw': '(2)'}), '(ages[0:minLen], evXFe[0:minLen], label=elms[ii], lw=2)\n', (6811, 6866), True, 'import matplotlib.pyplot as plt\n'), ((7209, 7240), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""TP-AGB time in ky"""'], {}), "('TP-AGB time in ky')\n", (7219, 7240), True, 'import matplotlib.pyplot as plt\n'), ((7249, 7276), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Mass fraction"""'], {}), "('Mass fraction')\n", (7259, 7276), True, 'import matplotlib.pyplot as plt\n'), ((7285, 7302), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (7295, 7302), True, 'import matplotlib.pyplot as plt\n'), ((7130, 7195), 'matplotlib.pyplot.plot', 'plt.plot', (['ages[0:minLen]', 'evMassF[0:minLen]'], {'label': 'elms[ii]', 'lw': '(2)'}), '(ages[0:minLen], evMassF[0:minLen], label=elms[ii], lw=2)\n', (7138, 7195), True, 'import matplotlib.pyplot as plt\n'), ((5113, 5128), 'math.log10', 'math.log10', (['val'], {}), '(val)\n', (5123, 5128), False, 'import sys, math, numpy, struct\n'), ((5536, 5551), 'math.log10', 'math.log10', (['val'], {}), '(val)\n', (5546, 5551), False, 'import sys, math, numpy, struct\n'), ((5554, 5572), 'math.log10', 'math.log10', (['sunVal'], {}), '(sunVal)\n', (5564, 5572), False, 'import sys, math, numpy, struct\n')] |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""This config file is for running experiments for the EMNLP camera ready.
It will generate the following experiments (depending on the value of eval_split and model):
- 200 dev examples
- GPT-3 Constrained Canonical, P = 20
- GPT-3 Constrained Meaning, P = 20
- GPT-3 Unconstrained Canonical, P = 20
- GPT-3 Unconstrained Meaning, P = 20
- GPT-3 Constrained Canonical, P = 8
- GPT-3 Constrained Meaning, P = 8
- All dev examples
- GPT-3 Constrained Canonical, P = 20
- BART Constrained Canonical
- BART Constrained Meaning
- BART Unconstrained Canonical
- BART Unconstrained Meaning
- GPT-2 Constrained Canonical
- GPT-2 Constrained Meaning
- GPT-2 Unconstrained Canonical
- GPT-2 Unconstrained Meaning
"""
from typing import Any, Callable, Dict
import torch
from typing_extensions import Literal
from semantic_parsing_with_constrained_lm.scfg.read_grammar import PreprocessedGrammar
from semantic_parsing_with_constrained_lm.scfg.scfg import SCFG
from semantic_parsing_with_constrained_lm.configs.lib.calflow import (
cached_read_calflow_jsonl,
make_semantic_parser_for_calflow,
)
from semantic_parsing_with_constrained_lm.configs.lib.common import PromptOrder
from semantic_parsing_with_constrained_lm.domains.calflow import CalflowMetrics, CalflowOutputLanguage
from semantic_parsing_with_constrained_lm.eval import TopKExactMatch
from semantic_parsing_with_constrained_lm.lm import TRAINED_MODEL_DIR, AutoregressiveModel, ClientType
from semantic_parsing_with_constrained_lm.lm_bart import Seq2SeqBart
from semantic_parsing_with_constrained_lm.lm_openai_gpt3 import IncrementalOpenAIGPT3
from semantic_parsing_with_constrained_lm.paths import DOMAINS_DIR
from semantic_parsing_with_constrained_lm.run_exp import EvalSplit, Experiment
def build_config(
log_dir, # pylint: disable=unused-argument
eval_split: EvalSplit,
model: ClientType,
**kwargs: Any, # pylint: disable=unused-argument
) -> Dict[str, Callable[[], Experiment]]:
EXAMPLES_DIR = DOMAINS_DIR / "calflow/data"
TRAIN_SIZE = 300
BEAM_SIZE = 10
use_gpt3 = model == ClientType.GPT3
preprocessed_grammar = PreprocessedGrammar.from_folder(
str(DOMAINS_DIR / "calflow/grammar")
)
scfg = SCFG(preprocessed_grammar)
def create_exp(
problem_type: Literal[
"constrained", "unconstrained-beam", "unconstrained-greedy"
],
output_type: CalflowOutputLanguage,
):
train_data = cached_read_calflow_jsonl(
EXAMPLES_DIR / "train_300_stratified.jsonl", output_type,
)[:TRAIN_SIZE]
if eval_split == EvalSplit.DevFull:
test_data = cached_read_calflow_jsonl(
EXAMPLES_DIR / "dev_all.jsonl", output_type,
)
elif eval_split == EvalSplit.DevSubset:
test_data = cached_read_calflow_jsonl(
EXAMPLES_DIR / "test_200_uniform.jsonl", output_type,
)
elif eval_split == EvalSplit.TrainSubset:
# Select a subset not already present in train
ids_train_300 = set()
with open(EXAMPLES_DIR / "ids_train_300_stratified.txt", "r") as id_file:
for _, line in enumerate(id_file):
dialogue_id, turn_index = line.strip().split(",")
ids_train_300.add((dialogue_id.strip(), int(turn_index.strip())))
train_data_1000_stratified = cached_read_calflow_jsonl(
EXAMPLES_DIR / "train_1000_stratified.jsonl", output_type,
)
test_data = [
datum
for datum in train_data_1000_stratified
if (datum.dialogue_id, datum.turn_part_index) not in ids_train_300
]
test_data = test_data[:100]
else:
raise ValueError(eval_split)
lm: AutoregressiveModel
if model == ClientType.GPT3:
lm = IncrementalOpenAIGPT3()
elif model == ClientType.BART:
lm = Seq2SeqBart(
# Part after / is set to match lm_finetune.py
f"{TRAINED_MODEL_DIR}/20000/calflow_{output_type}/",
device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu"),
)
else:
raise ValueError(model)
if problem_type == "constrained":
constrained = True
beam_size = BEAM_SIZE
elif problem_type == "unconstrained-beam":
constrained = False
beam_size = BEAM_SIZE
elif problem_type == "unconstrained-greedy":
constrained = False
beam_size = 1
else:
raise ValueError(f"{problem_type} not allowed")
parser = make_semantic_parser_for_calflow(
train_data,
lm,
use_gpt3,
beam_size,
output_type,
model,
preprocessed_grammar,
constrained,
prompt_order=PromptOrder.Shuffle,
)
return Experiment(
model=parser,
metrics={
"exact_match": TopKExactMatch(beam_size),
"round_trip": CalflowMetrics(
k=beam_size,
scfg=scfg,
data_type=output_type,
require_exact_length=True,
),
},
test_data=test_data,
client=lm,
)
def add_exp_to_dict(
exps_dict: Dict[str, Callable[[], Experiment]],
problem_type: Literal[
"constrained", "unconstrained-beam", "unconstrained-greedy"
],
output_type: CalflowOutputLanguage,
num_examples_per_prompt: int,
):
exp_name = f"calflow_{model}_{eval_split}_{problem_type}_{output_type}_prompt{num_examples_per_prompt}"
exps_dict[exp_name] = lambda: create_exp(problem_type, output_type)
result: Dict[str, Callable[[], Experiment]] = {}
if eval_split == EvalSplit.DevFull:
if use_gpt3:
add_exp_to_dict(result, "constrained", CalflowOutputLanguage.Canonical, 20)
else:
add_exp_to_dict(result, "constrained", CalflowOutputLanguage.Canonical, 0)
add_exp_to_dict(result, "constrained", CalflowOutputLanguage.Lispress, 0)
add_exp_to_dict(
result, "unconstrained-greedy", CalflowOutputLanguage.Canonical, 0
)
add_exp_to_dict(
result, "unconstrained-greedy", CalflowOutputLanguage.Lispress, 0
)
elif eval_split == EvalSplit.DevSubset:
if use_gpt3:
add_exp_to_dict(result, "constrained", CalflowOutputLanguage.Canonical, 20)
add_exp_to_dict(result, "constrained", CalflowOutputLanguage.Lispress, 20)
add_exp_to_dict(
result, "unconstrained-greedy", CalflowOutputLanguage.Canonical, 20
)
add_exp_to_dict(
result, "unconstrained-greedy", CalflowOutputLanguage.Lispress, 20
)
else:
add_exp_to_dict(result, "constrained", CalflowOutputLanguage.Canonical, 0)
add_exp_to_dict(result, "constrained", CalflowOutputLanguage.Lispress, 0)
elif eval_split == EvalSplit.TrainSubset and not use_gpt3:
add_exp_to_dict(result, "constrained", CalflowOutputLanguage.Canonical, 0)
add_exp_to_dict(result, "constrained", CalflowOutputLanguage.Lispress, 0)
return result
| [
"semantic_parsing_with_constrained_lm.domains.calflow.CalflowMetrics",
"semantic_parsing_with_constrained_lm.configs.lib.calflow.cached_read_calflow_jsonl",
"semantic_parsing_with_constrained_lm.eval.TopKExactMatch",
"torch.cuda.is_available",
"semantic_parsing_with_constrained_lm.lm_openai_gpt3.Incremental... | [((2305, 2331), 'semantic_parsing_with_constrained_lm.scfg.scfg.SCFG', 'SCFG', (['preprocessed_grammar'], {}), '(preprocessed_grammar)\n', (2309, 2331), False, 'from semantic_parsing_with_constrained_lm.scfg.scfg import SCFG\n'), ((4792, 4959), 'semantic_parsing_with_constrained_lm.configs.lib.calflow.make_semantic_parser_for_calflow', 'make_semantic_parser_for_calflow', (['train_data', 'lm', 'use_gpt3', 'beam_size', 'output_type', 'model', 'preprocessed_grammar', 'constrained'], {'prompt_order': 'PromptOrder.Shuffle'}), '(train_data, lm, use_gpt3, beam_size,\n output_type, model, preprocessed_grammar, constrained, prompt_order=\n PromptOrder.Shuffle)\n', (4824, 4959), False, 'from semantic_parsing_with_constrained_lm.configs.lib.calflow import cached_read_calflow_jsonl, make_semantic_parser_for_calflow\n'), ((2539, 2626), 'semantic_parsing_with_constrained_lm.configs.lib.calflow.cached_read_calflow_jsonl', 'cached_read_calflow_jsonl', (["(EXAMPLES_DIR / 'train_300_stratified.jsonl')", 'output_type'], {}), "(EXAMPLES_DIR / 'train_300_stratified.jsonl',\n output_type)\n", (2564, 2626), False, 'from semantic_parsing_with_constrained_lm.configs.lib.calflow import cached_read_calflow_jsonl, make_semantic_parser_for_calflow\n'), ((2728, 2798), 'semantic_parsing_with_constrained_lm.configs.lib.calflow.cached_read_calflow_jsonl', 'cached_read_calflow_jsonl', (["(EXAMPLES_DIR / 'dev_all.jsonl')", 'output_type'], {}), "(EXAMPLES_DIR / 'dev_all.jsonl', output_type)\n", (2753, 2798), False, 'from semantic_parsing_with_constrained_lm.configs.lib.calflow import cached_read_calflow_jsonl, make_semantic_parser_for_calflow\n'), ((3989, 4012), 'semantic_parsing_with_constrained_lm.lm_openai_gpt3.IncrementalOpenAIGPT3', 'IncrementalOpenAIGPT3', ([], {}), '()\n', (4010, 4012), False, 'from semantic_parsing_with_constrained_lm.lm_openai_gpt3 import IncrementalOpenAIGPT3\n'), ((2902, 2981), 'semantic_parsing_with_constrained_lm.configs.lib.calflow.cached_read_calflow_jsonl', 'cached_read_calflow_jsonl', (["(EXAMPLES_DIR / 'test_200_uniform.jsonl')", 'output_type'], {}), "(EXAMPLES_DIR / 'test_200_uniform.jsonl', output_type)\n", (2927, 2981), False, 'from semantic_parsing_with_constrained_lm.configs.lib.calflow import cached_read_calflow_jsonl, make_semantic_parser_for_calflow\n'), ((3490, 3578), 'semantic_parsing_with_constrained_lm.configs.lib.calflow.cached_read_calflow_jsonl', 'cached_read_calflow_jsonl', (["(EXAMPLES_DIR / 'train_1000_stratified.jsonl')", 'output_type'], {}), "(EXAMPLES_DIR / 'train_1000_stratified.jsonl',\n output_type)\n", (3515, 3578), False, 'from semantic_parsing_with_constrained_lm.configs.lib.calflow import cached_read_calflow_jsonl, make_semantic_parser_for_calflow\n'), ((5177, 5202), 'semantic_parsing_with_constrained_lm.eval.TopKExactMatch', 'TopKExactMatch', (['beam_size'], {}), '(beam_size)\n', (5191, 5202), False, 'from semantic_parsing_with_constrained_lm.eval import TopKExactMatch\n'), ((5234, 5326), 'semantic_parsing_with_constrained_lm.domains.calflow.CalflowMetrics', 'CalflowMetrics', ([], {'k': 'beam_size', 'scfg': 'scfg', 'data_type': 'output_type', 'require_exact_length': '(True)'}), '(k=beam_size, scfg=scfg, data_type=output_type,\n require_exact_length=True)\n', (5248, 5326), False, 'from semantic_parsing_with_constrained_lm.domains.calflow import CalflowMetrics, CalflowOutputLanguage\n'), ((4261, 4286), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4284, 4286), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:\Users\conta\Documents\script\Wizard\App\gui\ui_files\chat.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(428, 544)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(Form)
self.horizontalLayout_2.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_2.setSpacing(0)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.chat_frame = QtWidgets.QFrame(Form)
self.chat_frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.chat_frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.chat_frame.setObjectName("chat_frame")
self.verticalLayout = QtWidgets.QVBoxLayout(self.chat_frame)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setObjectName("verticalLayout")
self.chat_top_horizontalFrame = QtWidgets.QFrame(self.chat_frame)
self.chat_top_horizontalFrame.setObjectName("chat_top_horizontalFrame")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.chat_top_horizontalFrame)
self.horizontalLayout.setContentsMargins(10, 10, 10, 10)
self.horizontalLayout.setObjectName("horizontalLayout")
self.chat_project_label = QtWidgets.QLabel(self.chat_top_horizontalFrame)
self.chat_project_label.setObjectName("chat_project_label")
self.horizontalLayout.addWidget(self.chat_project_label)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.verticalLayout.addWidget(self.chat_top_horizontalFrame)
self.messages_frame = QtWidgets.QFrame(self.chat_frame)
self.messages_frame.setObjectName("messages_frame")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.messages_frame)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setSpacing(0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.chat_main_scrollArea = QtWidgets.QScrollArea(self.messages_frame)
self.chat_main_scrollArea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.chat_main_scrollArea.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustToContentsOnFirstShow)
self.chat_main_scrollArea.setWidgetResizable(True)
self.chat_main_scrollArea.setObjectName("chat_main_scrollArea")
self.chat_scrollAreaWidgetContents = QtWidgets.QWidget()
self.chat_scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 424, 397))
self.chat_scrollAreaWidgetContents.setObjectName("chat_scrollAreaWidgetContents")
self.chat_messages_layouts = QtWidgets.QVBoxLayout(self.chat_scrollAreaWidgetContents)
self.chat_messages_layouts.setContentsMargins(-1, 0, -1, 0)
self.chat_messages_layouts.setObjectName("chat_messages_layouts")
spacerItem1 = QtWidgets.QSpacerItem(20, 0, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.chat_messages_layouts.addItem(spacerItem1)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setContentsMargins(-1, 11, -1, 11)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.chat_show_more_pushButton = QtWidgets.QPushButton(self.chat_scrollAreaWidgetContents)
self.chat_show_more_pushButton.setMinimumSize(QtCore.QSize(100, 40))
self.chat_show_more_pushButton.setMaximumSize(QtCore.QSize(100, 40))
self.chat_show_more_pushButton.setObjectName("chat_show_more_pushButton")
self.horizontalLayout_3.addWidget(self.chat_show_more_pushButton)
self.chat_messages_layouts.addLayout(self.horizontalLayout_3)
self.chat_messages_layouts_1 = QtWidgets.QVBoxLayout()
self.chat_messages_layouts_1.setSpacing(0)
self.chat_messages_layouts_1.setObjectName("chat_messages_layouts_1")
self.chat_messages_layouts.addLayout(self.chat_messages_layouts_1)
spacerItem2 = QtWidgets.QSpacerItem(20, 11, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
self.chat_messages_layouts.addItem(spacerItem2)
self.chat_main_scrollArea.setWidget(self.chat_scrollAreaWidgetContents)
self.verticalLayout_2.addWidget(self.chat_main_scrollArea)
self.verticalLayout.addWidget(self.messages_frame)
self.line_2 = QtWidgets.QFrame(self.chat_frame)
self.line_2.setMaximumSize(QtCore.QSize(16777215, 1))
self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.verticalLayout.addWidget(self.line_2)
self.verticalLayout_3 = QtWidgets.QVBoxLayout()
self.verticalLayout_3.setContentsMargins(10, -1, 10, -1)
self.verticalLayout_3.setSpacing(0)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.chat_file_frame = QtWidgets.QFrame(self.chat_frame)
self.chat_file_frame.setObjectName("chat_file_frame")
self.chat_file_layout = QtWidgets.QHBoxLayout(self.chat_file_frame)
self.chat_file_layout.setObjectName("chat_file_layout")
self.added_chat_file_pushButton = QtWidgets.QPushButton(self.chat_file_frame)
self.added_chat_file_pushButton.setMinimumSize(QtCore.QSize(0, 40))
self.added_chat_file_pushButton.setText("")
self.added_chat_file_pushButton.setObjectName("added_chat_file_pushButton")
self.chat_file_layout.addWidget(self.added_chat_file_pushButton)
spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.chat_file_layout.addItem(spacerItem3)
self.verticalLayout_3.addWidget(self.chat_file_frame)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setContentsMargins(11, -1, 0, -1)
self.horizontalLayout_4.setSpacing(3)
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.add_file_pushButton = QtWidgets.QPushButton(self.chat_frame)
self.add_file_pushButton.setMinimumSize(QtCore.QSize(36, 36))
self.add_file_pushButton.setMaximumSize(QtCore.QSize(36, 36))
self.add_file_pushButton.setText("")
self.add_file_pushButton.setObjectName("add_file_pushButton")
self.horizontalLayout_4.addWidget(self.add_file_pushButton)
self.send_message_pushButton = QtWidgets.QPushButton(self.chat_frame)
self.send_message_pushButton.setMinimumSize(QtCore.QSize(36, 36))
self.send_message_pushButton.setMaximumSize(QtCore.QSize(36, 36))
self.send_message_pushButton.setText("")
self.send_message_pushButton.setObjectName("send_message_pushButton")
self.horizontalLayout_4.addWidget(self.send_message_pushButton)
self.verticalLayout_3.addLayout(self.horizontalLayout_4)
self.verticalLayout.addLayout(self.verticalLayout_3)
self.horizontalLayout_2.addWidget(self.chat_frame)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.chat_project_label.setText(_translate("Form", "TextLabel"))
self.chat_show_more_pushButton.setText(_translate("Form", "Show more"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Form = QtWidgets.QWidget()
ui = Ui_Form()
ui.setupUi(Form)
Form.show()
sys.exit(app.exec_())
| [
"PyQt5.QtWidgets.QWidget",
"PyQt5.QtWidgets.QSpacerItem",
"PyQt5.QtCore.QMetaObject.connectSlotsByName",
"PyQt5.QtWidgets.QFrame",
"PyQt5.QtWidgets.QHBoxLayout",
"PyQt5.QtCore.QRect",
"PyQt5.QtWidgets.QLabel",
"PyQt5.QtWidgets.QApplication",
"PyQt5.QtWidgets.QVBoxLayout",
"PyQt5.QtWidgets.QPushBut... | [((7788, 7820), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (7810, 7820), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7832, 7851), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', ([], {}), '()\n', (7849, 7851), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((446, 473), 'PyQt5.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', (['Form'], {}), '(Form)\n', (467, 473), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((677, 699), 'PyQt5.QtWidgets.QFrame', 'QtWidgets.QFrame', (['Form'], {}), '(Form)\n', (693, 699), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((914, 952), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', (['self.chat_frame'], {}), '(self.chat_frame)\n', (935, 952), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1154, 1187), 'PyQt5.QtWidgets.QFrame', 'QtWidgets.QFrame', (['self.chat_frame'], {}), '(self.chat_frame)\n', (1170, 1187), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1300, 1352), 'PyQt5.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', (['self.chat_top_horizontalFrame'], {}), '(self.chat_top_horizontalFrame)\n', (1321, 1352), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1516, 1563), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.chat_top_horizontalFrame'], {}), '(self.chat_top_horizontalFrame)\n', (1532, 1563), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1718, 1816), 'PyQt5.QtWidgets.QSpacerItem', 'QtWidgets.QSpacerItem', (['(40)', '(20)', 'QtWidgets.QSizePolicy.Expanding', 'QtWidgets.QSizePolicy.Minimum'], {}), '(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.\n QSizePolicy.Minimum)\n', (1739, 1816), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1961, 1994), 'PyQt5.QtWidgets.QFrame', 'QtWidgets.QFrame', (['self.chat_frame'], {}), '(self.chat_frame)\n', (1977, 1994), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2087, 2129), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', (['self.messages_frame'], {}), '(self.messages_frame)\n', (2108, 2129), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2335, 2377), 'PyQt5.QtWidgets.QScrollArea', 'QtWidgets.QScrollArea', (['self.messages_frame'], {}), '(self.messages_frame)\n', (2356, 2377), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2760, 2779), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', ([], {}), '()\n', (2777, 2779), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2992, 3049), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', (['self.chat_scrollAreaWidgetContents'], {}), '(self.chat_scrollAreaWidgetContents)\n', (3013, 3049), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3214, 3311), 'PyQt5.QtWidgets.QSpacerItem', 'QtWidgets.QSpacerItem', (['(20)', '(0)', 'QtWidgets.QSizePolicy.Minimum', 'QtWidgets.QSizePolicy.Expanding'], {}), '(20, 0, QtWidgets.QSizePolicy.Minimum, QtWidgets.\n QSizePolicy.Expanding)\n', (3235, 3311), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3397, 3420), 'PyQt5.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', ([], {}), '()\n', (3418, 3420), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3597, 3654), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.chat_scrollAreaWidgetContents'], {}), '(self.chat_scrollAreaWidgetContents)\n', (3618, 3654), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4074, 4097), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', ([], {}), '()\n', (4095, 4097), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4324, 4418), 'PyQt5.QtWidgets.QSpacerItem', 'QtWidgets.QSpacerItem', (['(20)', '(11)', 'QtWidgets.QSizePolicy.Minimum', 'QtWidgets.QSizePolicy.Fixed'], {}), '(20, 11, QtWidgets.QSizePolicy.Minimum, QtWidgets.\n QSizePolicy.Fixed)\n', (4345, 4418), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4698, 4731), 'PyQt5.QtWidgets.QFrame', 'QtWidgets.QFrame', (['self.chat_frame'], {}), '(self.chat_frame)\n', (4714, 4731), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5039, 5062), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', ([], {}), '()\n', (5060, 5062), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5267, 5300), 'PyQt5.QtWidgets.QFrame', 'QtWidgets.QFrame', (['self.chat_frame'], {}), '(self.chat_frame)\n', (5283, 5300), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5395, 5438), 'PyQt5.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', (['self.chat_file_frame'], {}), '(self.chat_file_frame)\n', (5416, 5438), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5545, 5588), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.chat_file_frame'], {}), '(self.chat_file_frame)\n', (5566, 5588), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5896, 5994), 'PyQt5.QtWidgets.QSpacerItem', 'QtWidgets.QSpacerItem', (['(40)', '(20)', 'QtWidgets.QSizePolicy.Expanding', 'QtWidgets.QSizePolicy.Minimum'], {}), '(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.\n QSizePolicy.Minimum)\n', (5917, 5994), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6137, 6160), 'PyQt5.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', ([], {}), '()\n', (6158, 6160), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6376, 6414), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.chat_frame'], {}), '(self.chat_frame)\n', (6397, 6414), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6777, 6815), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.chat_frame'], {}), '(self.chat_frame)\n', (6798, 6815), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7390, 7433), 'PyQt5.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['Form'], {}), '(Form)\n', (7427, 7433), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2835, 2863), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(0)', '(0)', '(424)', '(397)'], {}), '(0, 0, 424, 397)\n', (2847, 2863), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3709, 3730), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(100)', '(40)'], {}), '(100, 40)\n', (3721, 3730), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3786, 3807), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(100)', '(40)'], {}), '(100, 40)\n', (3798, 3807), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4767, 4792), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(16777215)', '(1)'], {}), '(16777215, 1)\n', (4779, 4792), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5644, 5663), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(0)', '(40)'], {}), '(0, 40)\n', (5656, 5663), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6463, 6483), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(36)', '(36)'], {}), '(36, 36)\n', (6475, 6483), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6533, 6553), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(36)', '(36)'], {}), '(36, 36)\n', (6545, 6553), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6868, 6888), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(36)', '(36)'], {}), '(36, 36)\n', (6880, 6888), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6942, 6962), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(36)', '(36)'], {}), '(36, 36)\n', (6954, 6962), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n')] |
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.pipeline import make_pipeline
import matplotlib.pyplot as plt
import numpy as np
import random
#===============================================================================================#
# Number of cases per day of covid 19 in the US for 218 days
cases = [
1,0,1,0,3,0,0,0,0,2,1,0,3,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,2,0,0,0,0,0,1,0,8,6,23,25,
20,66,47,64,147,225,290,278,414,267,338,1237,755,2797,3419,4777,3528,5836,8821,10934,
10115,13987,16916,17965,19332,18251,22635,22562,27043,26135,34864,30683,26065,43438,
21597,31534,31705,33251,33288,29145,24156,26385,27158,29164,29002,29916,25995,29468,
26490,25858,37144,29873,33161,29256,23371,23901,25512,31787,30369,29794,29763,19138,
22303,23366,30861,25996,26660,23792,18106,21467,20869,27191,22977,31967,13284,24481,
23405,22860,20522,24268,26229,15342,24958,16429,19680,21304,18123,23553,26177,14790,
24955,14676,20555,29034,29214,17919,17598,17376,20486,21744,22317,25468,21957,18577,
28392,22834,27828,32218,32411,27616,26657,34313,37667,40588,44602,44703,41390,35664,
43644,54357,52730,57718,52228,44361,46329,50304,64771,59260,66281,62918,60469,58858,
60971,67404,72045,74710,67574,63201,57777,63028,70106,72219,74818,64582,61795,54448,
59862,65935,68042,68605,58947,47576,49716,49988,53685,55836,62042,54590,48690,40522,
55540,56307,52799,56729,54686,41893,38986,39318,46500,44864,46754,45265,38679,33076,
37086,46393
]
days = list(range(len(cases)))
print(len(days))
days = np.asarray(days)
cases = np.asarray(cases)
days = days[:, np.newaxis]
cases = cases[:, np.newaxis]
plt.scatter(days, cases)
plt.show()
xseq = np.linspace(days.min(), days.max(), 300).reshape(-1,1)
regr = make_pipeline(PolynomialFeatures(12), LinearRegression())
regr.fit(days, cases)
plt.scatter(days, cases)
plt.plot(xseq, regr.predict(xseq), color = "red")
plt.show()
#===============================================================================================#
# Ref
# https://espanol.cdc.gov/coronavirus/2019-ncov/cases-updates/previouscases.html | [
"sklearn.preprocessing.PolynomialFeatures",
"numpy.asarray",
"matplotlib.pyplot.scatter",
"sklearn.linear_model.LinearRegression",
"matplotlib.pyplot.show"
] | [((1680, 1696), 'numpy.asarray', 'np.asarray', (['days'], {}), '(days)\n', (1690, 1696), True, 'import numpy as np\n'), ((1705, 1722), 'numpy.asarray', 'np.asarray', (['cases'], {}), '(cases)\n', (1715, 1722), True, 'import numpy as np\n'), ((1782, 1806), 'matplotlib.pyplot.scatter', 'plt.scatter', (['days', 'cases'], {}), '(days, cases)\n', (1793, 1806), True, 'import matplotlib.pyplot as plt\n'), ((1807, 1817), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1815, 1817), True, 'import matplotlib.pyplot as plt\n'), ((1970, 1994), 'matplotlib.pyplot.scatter', 'plt.scatter', (['days', 'cases'], {}), '(days, cases)\n', (1981, 1994), True, 'import matplotlib.pyplot as plt\n'), ((2045, 2055), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2053, 2055), True, 'import matplotlib.pyplot as plt\n'), ((1903, 1925), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', (['(12)'], {}), '(12)\n', (1921, 1925), False, 'from sklearn.preprocessing import PolynomialFeatures\n'), ((1927, 1945), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (1943, 1945), False, 'from sklearn.linear_model import LinearRegression\n')] |
import pandas as pd
from fastapi import APIRouter
# Define Router
router = APIRouter()
data_url = "https://raw.githubusercontent.com/plotly/datasets/master/finance-charts-apple.csv"
data = None
@router.get("/get_data")
async def get_data():
global data
if data is None:
data = pd.read_csv(data_url)
return data.to_dict('records')
| [
"fastapi.APIRouter",
"pandas.read_csv"
] | [((77, 88), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (86, 88), False, 'from fastapi import APIRouter\n'), ((299, 320), 'pandas.read_csv', 'pd.read_csv', (['data_url'], {}), '(data_url)\n', (310, 320), True, 'import pandas as pd\n')] |
# Generated by Django 4.0.1 on 2022-01-16 16:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0004_user_rating'),
]
operations = [
migrations.AddField(
model_name='user',
name='banned',
field=models.BooleanField(default=False),
),
]
| [
"django.db.models.BooleanField"
] | [((323, 357), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (342, 357), False, 'from django.db import migrations, models\n')] |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""test cases for Poisson distribution"""
import numpy as np
from scipy import stats
import mindspore.context as context
import mindspore.nn as nn
import mindspore.nn.probability.distribution as msd
from mindspore import Tensor
from mindspore import dtype
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
class Prob(nn.Cell):
"""
Test class: probability of Poisson distribution.
"""
def __init__(self):
super(Prob, self).__init__()
self.p = msd.Poisson([0.5], dtype=dtype.float32)
def construct(self, x_):
return self.p.prob(x_)
def test_pdf():
"""
Test pdf.
"""
poisson_benchmark = stats.poisson(mu=0.5)
expect_pdf = poisson_benchmark.pmf([-1.0, 0.0, 1.0]).astype(np.float32)
pdf = Prob()
x_ = Tensor(np.array([-1.0, 0.0, 1.0]).astype(np.float32), dtype=dtype.float32)
output = pdf(x_)
tol = 1e-6
assert (np.abs(output.asnumpy() - expect_pdf) < tol).all()
class LogProb(nn.Cell):
"""
Test class: log probability of Poisson distribution.
"""
def __init__(self):
super(LogProb, self).__init__()
self.p = msd.Poisson([0.5], dtype=dtype.float32)
def construct(self, x_):
return self.p.log_prob(x_)
def test_log_likelihood():
"""
Test log_pdf.
"""
poisson_benchmark = stats.poisson(mu=0.5)
expect_logpdf = poisson_benchmark.logpmf([1.0, 2.0]).astype(np.float32)
logprob = LogProb()
x_ = Tensor(np.array([1.0, 2.0]).astype(np.float32), dtype=dtype.float32)
output = logprob(x_)
tol = 1e-6
assert (np.abs(output.asnumpy() - expect_logpdf) < tol).all()
class Basics(nn.Cell):
"""
Test class: mean/sd/mode of Poisson distribution.
"""
def __init__(self):
super(Basics, self).__init__()
self.p = msd.Poisson([1.44], dtype=dtype.float32)
def construct(self):
return self.p.mean(), self.p.sd(), self.p.mode()
def test_basics():
"""
Test mean/standard/mode deviation.
"""
basics = Basics()
mean, sd, mode = basics()
expect_mean = 1.44
expect_sd = 1.2
expect_mode = 1
tol = 1e-6
assert (np.abs(mean.asnumpy() - expect_mean) < tol).all()
assert (np.abs(sd.asnumpy() - expect_sd) < tol).all()
assert (np.abs(mode.asnumpy() - expect_mode) < tol).all()
class Sampling(nn.Cell):
"""
Test class: sample of Poisson distribution.
"""
def __init__(self, shape, seed=0):
super(Sampling, self).__init__()
self.p = msd.Poisson([[1.0], [0.5]], seed=seed, dtype=dtype.float32)
self.shape = shape
def construct(self, rate=None):
return self.p.sample(self.shape, rate)
def test_sample():
"""
Test sample.
"""
shape = (2, 3)
seed = 10
rate = Tensor([1.0, 2.0, 3.0], dtype=dtype.float32)
sample = Sampling(shape, seed=seed)
output = sample(rate)
assert output.shape == (2, 3, 3)
class CDF(nn.Cell):
"""
Test class: cdf of Poisson distribution.
"""
def __init__(self):
super(CDF, self).__init__()
self.p = msd.Poisson([0.5], dtype=dtype.float32)
def construct(self, x_):
return self.p.cdf(x_)
def test_cdf():
"""
Test cdf.
"""
poisson_benchmark = stats.poisson(mu=0.5)
expect_cdf = poisson_benchmark.cdf([-1.0, 0.0, 1.0]).astype(np.float32)
cdf = CDF()
x_ = Tensor(np.array([-1.0, 0.0, 1.0]).astype(np.float32), dtype=dtype.float32)
output = cdf(x_)
tol = 1e-6
assert (np.abs(output.asnumpy() - expect_cdf) < tol).all()
class LogCDF(nn.Cell):
"""
Test class: log_cdf of Poisson distribution.
"""
def __init__(self):
super(LogCDF, self).__init__()
self.p = msd.Poisson([0.5], dtype=dtype.float32)
def construct(self, x_):
return self.p.log_cdf(x_)
def test_log_cdf():
"""
Test log_cdf.
"""
poisson_benchmark = stats.poisson(mu=0.5)
expect_logcdf = poisson_benchmark.logcdf([0.5, 1.0, 2.5]).astype(np.float32)
logcdf = LogCDF()
x_ = Tensor(np.array([0.5, 1.0, 2.5]).astype(np.float32), dtype=dtype.float32)
output = logcdf(x_)
tol = 1e-6
assert (np.abs(output.asnumpy() - expect_logcdf) < tol).all()
class SF(nn.Cell):
"""
Test class: survival function of Poisson distribution.
"""
def __init__(self):
super(SF, self).__init__()
self.p = msd.Poisson([0.5], dtype=dtype.float32)
def construct(self, x_):
return self.p.survival_function(x_)
def test_survival():
"""
Test survival function.
"""
poisson_benchmark = stats.poisson(mu=0.5)
expect_survival = poisson_benchmark.sf([-1.0, 0.0, 1.0]).astype(np.float32)
survival = SF()
x_ = Tensor(np.array([-1.0, 0.0, 1.0]).astype(np.float32), dtype=dtype.float32)
output = survival(x_)
tol = 1e-6
assert (np.abs(output.asnumpy() - expect_survival) < tol).all()
class LogSF(nn.Cell):
"""
Test class: log survival function of Poisson distribution.
"""
def __init__(self):
super(LogSF, self).__init__()
self.p = msd.Poisson([0.5], dtype=dtype.float32)
def construct(self, x_):
return self.p.log_survival(x_)
def test_log_survival():
"""
Test log survival function.
"""
poisson_benchmark = stats.poisson(mu=0.5)
expect_logsurvival = poisson_benchmark.logsf([-1.0, 0.0, 1.0]).astype(np.float32)
logsurvival = LogSF()
x_ = Tensor(np.array([-1.0, 0.0, 1.0]).astype(np.float32), dtype=dtype.float32)
output = logsurvival(x_)
tol = 1e-6
assert (np.abs(output.asnumpy() - expect_logsurvival) < tol).all()
| [
"mindspore.context.set_context",
"numpy.array",
"scipy.stats.poisson",
"mindspore.nn.probability.distribution.Poisson",
"mindspore.Tensor"
] | [((924, 992), 'mindspore.context.set_context', 'context.set_context', ([], {'mode': 'context.GRAPH_MODE', 'device_target': '"""Ascend"""'}), "(mode=context.GRAPH_MODE, device_target='Ascend')\n", (943, 992), True, 'import mindspore.context as context\n'), ((1334, 1355), 'scipy.stats.poisson', 'stats.poisson', ([], {'mu': '(0.5)'}), '(mu=0.5)\n', (1347, 1355), False, 'from scipy import stats\n'), ((2002, 2023), 'scipy.stats.poisson', 'stats.poisson', ([], {'mu': '(0.5)'}), '(mu=0.5)\n', (2015, 2023), False, 'from scipy import stats\n'), ((3448, 3492), 'mindspore.Tensor', 'Tensor', (['[1.0, 2.0, 3.0]'], {'dtype': 'dtype.float32'}), '([1.0, 2.0, 3.0], dtype=dtype.float32)\n', (3454, 3492), False, 'from mindspore import Tensor\n'), ((3926, 3947), 'scipy.stats.poisson', 'stats.poisson', ([], {'mu': '(0.5)'}), '(mu=0.5)\n', (3939, 3947), False, 'from scipy import stats\n'), ((4575, 4596), 'scipy.stats.poisson', 'stats.poisson', ([], {'mu': '(0.5)'}), '(mu=0.5)\n', (4588, 4596), False, 'from scipy import stats\n'), ((5263, 5284), 'scipy.stats.poisson', 'stats.poisson', ([], {'mu': '(0.5)'}), '(mu=0.5)\n', (5276, 5284), False, 'from scipy import stats\n'), ((5966, 5987), 'scipy.stats.poisson', 'stats.poisson', ([], {'mu': '(0.5)'}), '(mu=0.5)\n', (5979, 5987), False, 'from scipy import stats\n'), ((1162, 1201), 'mindspore.nn.probability.distribution.Poisson', 'msd.Poisson', (['[0.5]'], {'dtype': 'dtype.float32'}), '([0.5], dtype=dtype.float32)\n', (1173, 1201), True, 'import mindspore.nn.probability.distribution as msd\n'), ((1811, 1850), 'mindspore.nn.probability.distribution.Poisson', 'msd.Poisson', (['[0.5]'], {'dtype': 'dtype.float32'}), '([0.5], dtype=dtype.float32)\n', (1822, 1850), True, 'import mindspore.nn.probability.distribution as msd\n'), ((2482, 2522), 'mindspore.nn.probability.distribution.Poisson', 'msd.Poisson', (['[1.44]'], {'dtype': 'dtype.float32'}), '([1.44], dtype=dtype.float32)\n', (2493, 2522), True, 'import mindspore.nn.probability.distribution as msd\n'), ((3180, 3239), 'mindspore.nn.probability.distribution.Poisson', 'msd.Poisson', (['[[1.0], [0.5]]'], {'seed': 'seed', 'dtype': 'dtype.float32'}), '([[1.0], [0.5]], seed=seed, dtype=dtype.float32)\n', (3191, 3239), True, 'import mindspore.nn.probability.distribution as msd\n'), ((3755, 3794), 'mindspore.nn.probability.distribution.Poisson', 'msd.Poisson', (['[0.5]'], {'dtype': 'dtype.float32'}), '([0.5], dtype=dtype.float32)\n', (3766, 3794), True, 'import mindspore.nn.probability.distribution as msd\n'), ((4392, 4431), 'mindspore.nn.probability.distribution.Poisson', 'msd.Poisson', (['[0.5]'], {'dtype': 'dtype.float32'}), '([0.5], dtype=dtype.float32)\n', (4403, 4431), True, 'import mindspore.nn.probability.distribution as msd\n'), ((5059, 5098), 'mindspore.nn.probability.distribution.Poisson', 'msd.Poisson', (['[0.5]'], {'dtype': 'dtype.float32'}), '([0.5], dtype=dtype.float32)\n', (5070, 5098), True, 'import mindspore.nn.probability.distribution as msd\n'), ((5759, 5798), 'mindspore.nn.probability.distribution.Poisson', 'msd.Poisson', (['[0.5]'], {'dtype': 'dtype.float32'}), '([0.5], dtype=dtype.float32)\n', (5770, 5798), True, 'import mindspore.nn.probability.distribution as msd\n'), ((1465, 1491), 'numpy.array', 'np.array', (['[-1.0, 0.0, 1.0]'], {}), '([-1.0, 0.0, 1.0])\n', (1473, 1491), True, 'import numpy as np\n'), ((2140, 2160), 'numpy.array', 'np.array', (['[1.0, 2.0]'], {}), '([1.0, 2.0])\n', (2148, 2160), True, 'import numpy as np\n'), ((4056, 4082), 'numpy.array', 'np.array', (['[-1.0, 0.0, 1.0]'], {}), '([-1.0, 0.0, 1.0])\n', (4064, 4082), True, 'import numpy as np\n'), ((4716, 4741), 'numpy.array', 'np.array', (['[0.5, 1.0, 2.5]'], {}), '([0.5, 1.0, 2.5])\n', (4724, 4741), True, 'import numpy as np\n'), ((5401, 5427), 'numpy.array', 'np.array', (['[-1.0, 0.0, 1.0]'], {}), '([-1.0, 0.0, 1.0])\n', (5409, 5427), True, 'import numpy as np\n'), ((6116, 6142), 'numpy.array', 'np.array', (['[-1.0, 0.0, 1.0]'], {}), '([-1.0, 0.0, 1.0])\n', (6124, 6142), True, 'import numpy as np\n')] |
#Dependencies
from googlesearch import search
import sys
#Variables
args = sys.argv
#Main
if len(args) == 1:
print("python index.py <keyword> <amount> <output>")
sys.exit()
if len(args) == 2:
print("Invalid amount.")
sys.exit()
if len(args) == 3:
print("Invalid output.")
sys.exit()
if args[2].isnumeric() == False:
print("amount is not a number.")
sys.exit()
results = search(f"site:anonfiles.com {args[1]}", num_results=int(args[2]) - 1)
if len(results) == 0:
print("No links found.")
sys.exit()
for link in results:
print(link)
print(f"{len(results)} links found.")
print("Saving the results, please wait.")
file = open(args[3], "w")
file.write("\n".join(results))
file.close()
print(f"Results has been saved to {args[3]}")
| [
"sys.exit"
] | [((172, 182), 'sys.exit', 'sys.exit', ([], {}), '()\n', (180, 182), False, 'import sys\n'), ((240, 250), 'sys.exit', 'sys.exit', ([], {}), '()\n', (248, 250), False, 'import sys\n'), ((308, 318), 'sys.exit', 'sys.exit', ([], {}), '()\n', (316, 318), False, 'import sys\n'), ((394, 404), 'sys.exit', 'sys.exit', ([], {}), '()\n', (402, 404), False, 'import sys\n'), ((546, 556), 'sys.exit', 'sys.exit', ([], {}), '()\n', (554, 556), False, 'import sys\n')] |
from RPi import GPIO
from time import sleep
# clk = 17
# dt = 18
sw = 24
clk = 12
dt = 25
GPIO.setmode(GPIO.BCM)
GPIO.setup(clk, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(dt, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(sw, GPIO.IN, pull_up_down=GPIO.PUD_UP)
counter = 0
clkLastState = GPIO.input(clk)
try:
while True:
pushBtn = GPIO.input(sw)
if pushBtn !=1:
print("button pressed..")
clkState = GPIO.input(clk)
dtState = GPIO.input(dt)
if clkState != clkLastState:
if dtState != clkState:
counter += 1
else:
counter -= 1
print(counter)
clkLastState = clkState
sleep(0.01)
finally:
GPIO.cleanup() | [
"RPi.GPIO.cleanup",
"RPi.GPIO.setup",
"time.sleep",
"RPi.GPIO.input",
"RPi.GPIO.setmode"
] | [((93, 115), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BCM'], {}), '(GPIO.BCM)\n', (105, 115), False, 'from RPi import GPIO\n'), ((116, 168), 'RPi.GPIO.setup', 'GPIO.setup', (['clk', 'GPIO.IN'], {'pull_up_down': 'GPIO.PUD_DOWN'}), '(clk, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n', (126, 168), False, 'from RPi import GPIO\n'), ((169, 220), 'RPi.GPIO.setup', 'GPIO.setup', (['dt', 'GPIO.IN'], {'pull_up_down': 'GPIO.PUD_DOWN'}), '(dt, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n', (179, 220), False, 'from RPi import GPIO\n'), ((221, 270), 'RPi.GPIO.setup', 'GPIO.setup', (['sw', 'GPIO.IN'], {'pull_up_down': 'GPIO.PUD_UP'}), '(sw, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n', (231, 270), False, 'from RPi import GPIO\n'), ((299, 314), 'RPi.GPIO.input', 'GPIO.input', (['clk'], {}), '(clk)\n', (309, 314), False, 'from RPi import GPIO\n'), ((911, 925), 'RPi.GPIO.cleanup', 'GPIO.cleanup', ([], {}), '()\n', (923, 925), False, 'from RPi import GPIO\n'), ((367, 381), 'RPi.GPIO.input', 'GPIO.input', (['sw'], {}), '(sw)\n', (377, 381), False, 'from RPi import GPIO\n'), ((516, 531), 'RPi.GPIO.input', 'GPIO.input', (['clk'], {}), '(clk)\n', (526, 531), False, 'from RPi import GPIO\n'), ((558, 572), 'RPi.GPIO.input', 'GPIO.input', (['dt'], {}), '(dt)\n', (568, 572), False, 'from RPi import GPIO\n'), ((882, 893), 'time.sleep', 'sleep', (['(0.01)'], {}), '(0.01)\n', (887, 893), False, 'from time import sleep\n')] |
# ------------------------------------------------------------
# Copyright (c) 2017-present, SeetaTech, Co.,Ltd.
#
# Licensed under the BSD 2-Clause License.
# You should have received a copy of the BSD 2-Clause License
# along with the software. If not, See,
#
# <https://opensource.org/licenses/BSD-2-Clause>
#
# ------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import dragon
import threading
from dragon.vm.tensorflow.framework import dtypes, ops
from dragon.vm.tensorflow.ops.variables import Variable
from dragon.vm.tensorflow.framework.ops import _DefaultStack
from dragon.vm.tensorflow.ops import init_ops
class VariableScope(object):
"""Construct a Variable."""
def __init__(self, reuse, name='', name_scope='', **kwargs):
# Whether to reuse the existing variables
self._reuse = reuse
# Store the variable name scope till the current level
self._name = name
# Store the tensor name scope till the current level
self._name_scope = name_scope if name_scope else ''
# A dictionary of the stored TensorFlow variables.
self._vars = {}
# Store the previous variable scope object
self._old_scope = None
# Store the name scope context manager
self._name_scope_ctx = kwargs.get('name_scope_ctx', None)
@property
def reuse(self):
"""Whether this variable scope can reuse the variables.
Returns
-------
boolean
``True`` if variables can be reused.
"""
return self._reuse
@property
def name(self):
"""Return the tensor name scope till the current level.
Returns
-------
str
The tensor name scope.
"""
return self._name
@property
def original_name_scope(self):
"""Return the variable name scope till the current level.
Returns
-------
str
The variable name scope.
"""
return self._name_scope
@property
def vars(self):
"""Return the variable dict of this scope.
Returns
-------
dict of Tensor
The variable dict.
"""
return self._vars
def get_variable(self,
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=True,
collections=None,
validate_shape=True):
excepted_name = self.name + name
if not excepted_name in self._vars:
# Create a new variable
if shape is None:
raise ValueError(
'Must specific a shape to create a Variable.')
if initializer is None:
initializer = self._get_default_initializer(
name, shape=shape, dtype=dtype)
variable = Variable(
initial_value=initializer(shape, dtype=dtype),
regularizer=regularizer,
trainable=trainable,
collections=collections,
validate_shape=validate_shape,
name_from_variable_scope=excepted_name,
dtype=dtype)
self._vars[excepted_name] = variable
return variable
else:
# Return a existing variable
if self._reuse:
return self._vars[excepted_name]
raise ValueError('Variable {} already exists, disallowed. '
'Did you mean to set reuse=True in VarScope?'.format(excepted_name))
def __enter__(self):
# Variable scope will also affect the global name scope
self._name_scope_ctx.__enter__()
get_variable_scope_store().open(self)
return self
def __exit__(self, type, value, traceback):
get_variable_scope_store().close()
self._name_scope_ctx.__exit__(type, value, traceback)
def _get_default_initializer(self, name, shape=None, dtype=dtypes.float32):
# Defaults: float32
if dtype is None:
dtype = dtypes.float32
# Xavier for float16, float32, float64
if dtype.is_floating:
initializer = init_ops.glorot_uniform_initializer()
# Zeros for integers
elif dtype.is_integer or \
dtype.is_unsigned or \
dtype.is_bool:
initializer = init_ops.zeros_initializer()(
shape=shape, dtype=dtype.base_dtype)
# Fail to match the DType
else:
raise ValueError(
'An initializer for Variable({}) of %s is required.'
.format(name, dtype.base_dtype))
return initializer
def variable_scope(name_or_scope, reuse=None, **kwargs):
name_or_scope = name_or_scope if name_or_scope else ''
prefix = name_or_scope + '/' if name_or_scope != '' else ''
vs_store = get_variable_scope_store()
vs_name = vs_store.current_scope.name + prefix
original_name_scope = dragon.get_default_name_scope() + prefix
vs = VariableScope(reuse, name=vs_name, name_scope=original_name_scope)
# Store the ctx manager instead of returning
# As we should return a VariableScope
vs._name_scope_ctx = dragon.name_scope(name_or_scope)
return vs
def get_variable(name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=True,
collections=None,
validate_shape=True,
**kwargs):
return get_variable_scope().get_variable(
name, shape=shape, dtype=dtype,
initializer=initializer, regularizer=regularizer,
trainable=trainable, collections=collections,
validate_shape=validate_shape)
_GLOBAL_VARIABLE_SCOPE_STORE_KEY = ("__varscope",)
_GLOBAL_VARIABLE_SCOPE_STACK = _DefaultStack()
class _VariableScopeStore(threading.local):
"""A thread local store for the current variable scope."""
def __init__(self):
super(_VariableScopeStore, self).__init__()
self.name_scope = None
self.previous_scope = None
self.current_scope = VariableScope(False)
def open(self, var_scope):
self.previous_scope = self.current_scope
self.current_scope = var_scope
def close(self):
self.current_scope = self.previous_scope
def get_variable_scope_store():
scope_store = ops.get_collection(_GLOBAL_VARIABLE_SCOPE_STORE_KEY)
if not scope_store:
scope_store = _VariableScopeStore()
ops.add_to_collection(_GLOBAL_VARIABLE_SCOPE_STORE_KEY, scope_store)
else:
scope_store = scope_store[0]
return scope_store
def get_variable_scope():
"""Returns the current variable scope."""
return get_variable_scope_store().current_scope
| [
"dragon.vm.tensorflow.framework.ops.add_to_collection",
"dragon.vm.tensorflow.framework.ops._DefaultStack",
"dragon.vm.tensorflow.ops.init_ops.zeros_initializer",
"dragon.vm.tensorflow.framework.ops.get_collection",
"dragon.get_default_name_scope",
"dragon.name_scope",
"dragon.vm.tensorflow.ops.init_ops... | [((6029, 6044), 'dragon.vm.tensorflow.framework.ops._DefaultStack', '_DefaultStack', ([], {}), '()\n', (6042, 6044), False, 'from dragon.vm.tensorflow.framework.ops import _DefaultStack\n'), ((5350, 5382), 'dragon.name_scope', 'dragon.name_scope', (['name_or_scope'], {}), '(name_or_scope)\n', (5367, 5382), False, 'import dragon\n'), ((6590, 6642), 'dragon.vm.tensorflow.framework.ops.get_collection', 'ops.get_collection', (['_GLOBAL_VARIABLE_SCOPE_STORE_KEY'], {}), '(_GLOBAL_VARIABLE_SCOPE_STORE_KEY)\n', (6608, 6642), False, 'from dragon.vm.tensorflow.framework import dtypes, ops\n'), ((5117, 5148), 'dragon.get_default_name_scope', 'dragon.get_default_name_scope', ([], {}), '()\n', (5146, 5148), False, 'import dragon\n'), ((6719, 6787), 'dragon.vm.tensorflow.framework.ops.add_to_collection', 'ops.add_to_collection', (['_GLOBAL_VARIABLE_SCOPE_STORE_KEY', 'scope_store'], {}), '(_GLOBAL_VARIABLE_SCOPE_STORE_KEY, scope_store)\n', (6740, 6787), False, 'from dragon.vm.tensorflow.framework import dtypes, ops\n'), ((4301, 4338), 'dragon.vm.tensorflow.ops.init_ops.glorot_uniform_initializer', 'init_ops.glorot_uniform_initializer', ([], {}), '()\n', (4336, 4338), False, 'from dragon.vm.tensorflow.ops import init_ops\n'), ((4504, 4532), 'dragon.vm.tensorflow.ops.init_ops.zeros_initializer', 'init_ops.zeros_initializer', ([], {}), '()\n', (4530, 4532), False, 'from dragon.vm.tensorflow.ops import init_ops\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 7/20/20 8:07 PM
# @Author : anonymous
# @File : Mutation_equal.py
#TODO:integrate transformation rules
import re
import random
import mutation.gateCirq_EqualT as MC
import mutation.gateQiskit_EqualT as MQ
import mutation.gatePyQuil_EqualT as MP
def figure_out_tab (line:str):
i = 0
tab = ""
while line[i] == " ":
tab = tab + line[i]
i = i + 1
return tab
def generate_trival(address_in:str, address_out:str, total_number:int,platform:str,qubit_number:int, mutation_number:int): # generate two X/Y/CNOT gate?
end_find = re.compile("# circuit end")
writefile_address = "../data/"+address_out[13:-3]+".csv"
writefile_find = re.compile("../data/"+address_in[13:-3]+".csv")
total_operation_find = re.compile("# total number=")
readfile = open(address_in)
writefile = open(address_out,"w")
line = readfile.readline()
print("write at:", address_out)
equal_change = ["X","Y","SWAP","CNOT"]
pattern = equal_change[mutation_number]
tab = ""
while line:
if figure_out_tab(line)!="":
tab = figure_out_tab(line)
write_line = line
if writefile_find.search(line):
write_line = re.sub(writefile_find, writefile_address, write_line) #change write file in program
writefile.write(write_line)
line = readfile.readline()
continue
if end_find.search(line):
if platform == "Qiskit":
if pattern == "CNOT":
if qubit_number==0:
write_line = MQ.two_CNOT(tab, 1, total_number)
else:
write_line = MQ.two_CNOT(tab, qubit_number,total_number)
if pattern == "SWAP":
if qubit_number==0:
write_line = MQ.two_SWAP(tab, 1, total_number)
else:
write_line = MQ.two_SWAP(tab, qubit_number,total_number)
if pattern == "Y":
write_line = MQ.two_Y(tab, qubit_number,total_number)
if pattern == "X":
write_line = MQ.two_X(tab, qubit_number,total_number)
elif platform == "Cirq":
if pattern == "CNOT":
if qubit_number==0:
write_line = MC.two_CNOT(tab, 1, total_number)
else:
write_line = MC.two_CNOT(tab, qubit_number,total_number)
if pattern == "SWAP":
if qubit_number==0:
write_line = MC.two_SWAP(tab, 1, total_number)
else:
write_line = MC.two_SWAP(tab, qubit_number,total_number)
if pattern == "Y":
write_line = MC.two_Y(tab, qubit_number,total_number)
if pattern == "X":
write_line = MC.two_X(tab, qubit_number,total_number)
else:
if pattern == "CNOT":
if qubit_number==0:
write_line = MP.two_CNOT(tab, 1, total_number)
else:
write_line = MP.two_CNOT(tab, qubit_number,total_number)
if pattern == "SWAP":
if qubit_number==0:
write_line = MP.two_SWAP(tab, 1, total_number)
else:
write_line = MP.two_SWAP(tab, qubit_number,total_number)
if pattern == "Y":
write_line = MP.two_Y(tab, qubit_number,total_number)
if pattern == "X":
write_line = MP.two_X(tab, qubit_number,total_number)
write_line = write_line+line
writefile.write(write_line)
line = readfile.readline()
continue
if total_operation_find.search(line):
writefile.write("# total number=" + str(total_number + 2) + "\n") # update total operation number
else:
writefile.write(line)
line = readfile.readline()
writefile.close()
readfile.close()
def generate_same(operation_number:int, address_in:str, address_out:str, total_number:int, pattern:str, platform:str):
operation_find = re.compile("# number="+str(operation_number)+"\n")
writefile_address = "../data/"+address_out[13:-3]+".csv"
writefile_find = re.compile("../data/"+address_in[13:-3]+".csv")
total_operation_find = re.compile("# total number=")
readfile = open(address_in)
writefile = open(address_out,"w")
line = readfile.readline()
print("write at:",address_out)
while line:
write_line=line
if writefile_find.search(line):
write_line = re.sub(writefile_find,writefile_address,write_line)
writefile.write(write_line)
line = readfile.readline()
continue
if operation_find.search(line):
if platform=="Qiskit":
if pattern == "CNOT":
write_line = MQ.cnot_to_hczh(line, total_number)
if pattern == "Z":
write_line = MQ.z_to_cnotzcnot(line, total_number)
if pattern == "X":
write_line = MQ.x_to_cnotxcnot(line, total_number)
elif platform=="Cirq":
if pattern == "CNOT":
write_line = MC.cnot_to_hczh(line, total_number)
if pattern == "Z":
write_line = MC.z_to_cnotzcnot(line, total_number)
if pattern == "X":
write_line = MC.x_to_cnotxcnot(line, total_number)
else:
if pattern == "CNOT":
write_line = MP.cnot_to_hczh(line, total_number)
if pattern == "Z":
write_line = MP.z_to_cnotzcnot(line, total_number)
if pattern == "X":
write_line = MP.x_to_cnotxcnot(line, total_number)
writefile.write(write_line)
line = readfile.readline()
continue
if total_operation_find.search(line):
writefile.write("# total number="+str(total_number+3)+"\n") #update total operation number
else:
writefile.write(line)
line = readfile.readline()
writefile.close()
readfile.close()
def mutate(seed:int, write:int):
qubit_number_patter = re.compile("# qubit number=")
total_operation_id = re.compile("# total number=")
operation_id = re.compile("# number=")
patterns = {}
patterns["CNOT"] = re.compile("cirq.CNOT")
patterns["Z"] = re.compile("cirq.Z")
patterns["X"] = re.compile("cirq.X")
circuit_patter_end = re.compile("# circuit end")
total_number=0
flag=0 # To check if enter the circuit code or not
qubit_number=0 # To find the total qubit number
cirq_address_in = "../benchmark/startCirq"+str(seed)+".py"
pyquil_address_in = "../benchmark/startPyquil"+ str(seed) + ".py"
qiskit_address_in = "../benchmark/startQiskit"+ str(seed) + ".py"
readfile = open(cirq_address_in)
line = readfile.readline()
cirq_address_out = "../benchmark/startCirq"+str(write)+".py"
pyquil_address_out = "../benchmark/startPyquil"+ str(write) + ".py"
qiskit_address_out = "../benchmark/startQiskit"+ str(write) + ".py"
while line:
if total_operation_id.search(line):
total_number = int(line[total_operation_id.search(line).span()[1]:len(line)-1])
if qubit_number_patter.search(line):
qubit_number = int(line[qubit_number_patter.search(line).span()[1]:])
if operation_id.search(line):
flag = int(line[operation_id.search(line).span()[1]:len(line)-1])
for pattern in patterns:
if (patterns[pattern].search(line) is not None) and (operation_id.search(line) is not None) and (qubit_number>1):
if random.randint(0,5)>3 :
readfile.close()
generate_same(flag, cirq_address_in, cirq_address_out, total_number, pattern, "Cirq")
generate_same(flag, pyquil_address_in, pyquil_address_out, total_number, pattern, "Pyquil")
generate_same(flag, qiskit_address_in, qiskit_address_out, total_number, pattern, "Qiskit")
return
if circuit_patter_end.search(line) is not None:
mutate_qubit_number = random.randint(0,qubit_number-1) # the qubit that mutate operation on
mutation_number = random.randint(0,3) #right now we have four trivial mutation way
if qubit_number==1:
mutation_number = random.randint(0,1)
generate_trival(cirq_address_in,cirq_address_out,total_number,"Cirq",mutate_qubit_number,mutation_number)
generate_trival(pyquil_address_in,pyquil_address_out,total_number,"Pyquil",mutate_qubit_number,mutation_number)
generate_trival(qiskit_address_in,qiskit_address_out,total_number,"Qiskit",mutate_qubit_number,mutation_number)
return
line = readfile.readline()
readfile.close()
| [
"re.compile",
"mutation.gatePyQuil_EqualT.two_CNOT",
"mutation.gatePyQuil_EqualT.two_X",
"mutation.gateCirq_EqualT.cnot_to_hczh",
"mutation.gatePyQuil_EqualT.cnot_to_hczh",
"mutation.gateCirq_EqualT.two_CNOT",
"mutation.gateQiskit_EqualT.z_to_cnotzcnot",
"mutation.gatePyQuil_EqualT.z_to_cnotzcnot",
... | [((623, 650), 're.compile', 're.compile', (['"""# circuit end"""'], {}), "('# circuit end')\n", (633, 650), False, 'import re\n'), ((733, 784), 're.compile', 're.compile', (["('../data/' + address_in[13:-3] + '.csv')"], {}), "('../data/' + address_in[13:-3] + '.csv')\n", (743, 784), False, 'import re\n'), ((808, 837), 're.compile', 're.compile', (['"""# total number="""'], {}), "('# total number=')\n", (818, 837), False, 'import re\n'), ((4476, 4527), 're.compile', 're.compile', (["('../data/' + address_in[13:-3] + '.csv')"], {}), "('../data/' + address_in[13:-3] + '.csv')\n", (4486, 4527), False, 'import re\n'), ((4551, 4580), 're.compile', 're.compile', (['"""# total number="""'], {}), "('# total number=')\n", (4561, 4580), False, 'import re\n'), ((6505, 6534), 're.compile', 're.compile', (['"""# qubit number="""'], {}), "('# qubit number=')\n", (6515, 6534), False, 'import re\n'), ((6560, 6589), 're.compile', 're.compile', (['"""# total number="""'], {}), "('# total number=')\n", (6570, 6589), False, 'import re\n'), ((6609, 6632), 're.compile', 're.compile', (['"""# number="""'], {}), "('# number=')\n", (6619, 6632), False, 'import re\n'), ((6675, 6698), 're.compile', 're.compile', (['"""cirq.CNOT"""'], {}), "('cirq.CNOT')\n", (6685, 6698), False, 'import re\n'), ((6719, 6739), 're.compile', 're.compile', (['"""cirq.Z"""'], {}), "('cirq.Z')\n", (6729, 6739), False, 'import re\n'), ((6760, 6780), 're.compile', 're.compile', (['"""cirq.X"""'], {}), "('cirq.X')\n", (6770, 6780), False, 'import re\n'), ((6807, 6834), 're.compile', 're.compile', (['"""# circuit end"""'], {}), "('# circuit end')\n", (6817, 6834), False, 'import re\n'), ((1266, 1319), 're.sub', 're.sub', (['writefile_find', 'writefile_address', 'write_line'], {}), '(writefile_find, writefile_address, write_line)\n', (1272, 1319), False, 'import re\n'), ((4825, 4878), 're.sub', 're.sub', (['writefile_find', 'writefile_address', 'write_line'], {}), '(writefile_find, writefile_address, write_line)\n', (4831, 4878), False, 'import re\n'), ((8531, 8566), 'random.randint', 'random.randint', (['(0)', '(qubit_number - 1)'], {}), '(0, qubit_number - 1)\n', (8545, 8566), False, 'import random\n'), ((8631, 8651), 'random.randint', 'random.randint', (['(0)', '(3)'], {}), '(0, 3)\n', (8645, 8651), False, 'import random\n'), ((8763, 8783), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (8777, 8783), False, 'import random\n'), ((2102, 2143), 'mutation.gateQiskit_EqualT.two_Y', 'MQ.two_Y', (['tab', 'qubit_number', 'total_number'], {}), '(tab, qubit_number, total_number)\n', (2110, 2143), True, 'import mutation.gateQiskit_EqualT as MQ\n'), ((2211, 2252), 'mutation.gateQiskit_EqualT.two_X', 'MQ.two_X', (['tab', 'qubit_number', 'total_number'], {}), '(tab, qubit_number, total_number)\n', (2219, 2252), True, 'import mutation.gateQiskit_EqualT as MQ\n'), ((5124, 5159), 'mutation.gateQiskit_EqualT.cnot_to_hczh', 'MQ.cnot_to_hczh', (['line', 'total_number'], {}), '(line, total_number)\n', (5139, 5159), True, 'import mutation.gateQiskit_EqualT as MQ\n'), ((5228, 5265), 'mutation.gateQiskit_EqualT.z_to_cnotzcnot', 'MQ.z_to_cnotzcnot', (['line', 'total_number'], {}), '(line, total_number)\n', (5245, 5265), True, 'import mutation.gateQiskit_EqualT as MQ\n'), ((5334, 5371), 'mutation.gateQiskit_EqualT.x_to_cnotxcnot', 'MQ.x_to_cnotxcnot', (['line', 'total_number'], {}), '(line, total_number)\n', (5351, 5371), True, 'import mutation.gateQiskit_EqualT as MQ\n'), ((8021, 8041), 'random.randint', 'random.randint', (['(0)', '(5)'], {}), '(0, 5)\n', (8035, 8041), False, 'import random\n'), ((1637, 1670), 'mutation.gateQiskit_EqualT.two_CNOT', 'MQ.two_CNOT', (['tab', '(1)', 'total_number'], {}), '(tab, 1, total_number)\n', (1648, 1670), True, 'import mutation.gateQiskit_EqualT as MQ\n'), ((1734, 1778), 'mutation.gateQiskit_EqualT.two_CNOT', 'MQ.two_CNOT', (['tab', 'qubit_number', 'total_number'], {}), '(tab, qubit_number, total_number)\n', (1745, 1778), True, 'import mutation.gateQiskit_EqualT as MQ\n'), ((1893, 1926), 'mutation.gateQiskit_EqualT.two_SWAP', 'MQ.two_SWAP', (['tab', '(1)', 'total_number'], {}), '(tab, 1, total_number)\n', (1904, 1926), True, 'import mutation.gateQiskit_EqualT as MQ\n'), ((1990, 2034), 'mutation.gateQiskit_EqualT.two_SWAP', 'MQ.two_SWAP', (['tab', 'qubit_number', 'total_number'], {}), '(tab, qubit_number, total_number)\n', (2001, 2034), True, 'import mutation.gateQiskit_EqualT as MQ\n'), ((2869, 2910), 'mutation.gateCirq_EqualT.two_Y', 'MC.two_Y', (['tab', 'qubit_number', 'total_number'], {}), '(tab, qubit_number, total_number)\n', (2877, 2910), True, 'import mutation.gateCirq_EqualT as MC\n'), ((2978, 3019), 'mutation.gateCirq_EqualT.two_X', 'MC.two_X', (['tab', 'qubit_number', 'total_number'], {}), '(tab, qubit_number, total_number)\n', (2986, 3019), True, 'import mutation.gateCirq_EqualT as MC\n'), ((3617, 3658), 'mutation.gatePyQuil_EqualT.two_Y', 'MP.two_Y', (['tab', 'qubit_number', 'total_number'], {}), '(tab, qubit_number, total_number)\n', (3625, 3658), True, 'import mutation.gatePyQuil_EqualT as MP\n'), ((3726, 3767), 'mutation.gatePyQuil_EqualT.two_X', 'MP.two_X', (['tab', 'qubit_number', 'total_number'], {}), '(tab, qubit_number, total_number)\n', (3734, 3767), True, 'import mutation.gatePyQuil_EqualT as MP\n'), ((5478, 5513), 'mutation.gateCirq_EqualT.cnot_to_hczh', 'MC.cnot_to_hczh', (['line', 'total_number'], {}), '(line, total_number)\n', (5493, 5513), True, 'import mutation.gateCirq_EqualT as MC\n'), ((5582, 5619), 'mutation.gateCirq_EqualT.z_to_cnotzcnot', 'MC.z_to_cnotzcnot', (['line', 'total_number'], {}), '(line, total_number)\n', (5599, 5619), True, 'import mutation.gateCirq_EqualT as MC\n'), ((5688, 5725), 'mutation.gateCirq_EqualT.x_to_cnotxcnot', 'MC.x_to_cnotxcnot', (['line', 'total_number'], {}), '(line, total_number)\n', (5705, 5725), True, 'import mutation.gateCirq_EqualT as MC\n'), ((5815, 5850), 'mutation.gatePyQuil_EqualT.cnot_to_hczh', 'MP.cnot_to_hczh', (['line', 'total_number'], {}), '(line, total_number)\n', (5830, 5850), True, 'import mutation.gatePyQuil_EqualT as MP\n'), ((5919, 5956), 'mutation.gatePyQuil_EqualT.z_to_cnotzcnot', 'MP.z_to_cnotzcnot', (['line', 'total_number'], {}), '(line, total_number)\n', (5936, 5956), True, 'import mutation.gatePyQuil_EqualT as MP\n'), ((6025, 6062), 'mutation.gatePyQuil_EqualT.x_to_cnotxcnot', 'MP.x_to_cnotxcnot', (['line', 'total_number'], {}), '(line, total_number)\n', (6042, 6062), True, 'import mutation.gatePyQuil_EqualT as MP\n'), ((2404, 2437), 'mutation.gateCirq_EqualT.two_CNOT', 'MC.two_CNOT', (['tab', '(1)', 'total_number'], {}), '(tab, 1, total_number)\n', (2415, 2437), True, 'import mutation.gateCirq_EqualT as MC\n'), ((2501, 2545), 'mutation.gateCirq_EqualT.two_CNOT', 'MC.two_CNOT', (['tab', 'qubit_number', 'total_number'], {}), '(tab, qubit_number, total_number)\n', (2512, 2545), True, 'import mutation.gateCirq_EqualT as MC\n'), ((2660, 2693), 'mutation.gateCirq_EqualT.two_SWAP', 'MC.two_SWAP', (['tab', '(1)', 'total_number'], {}), '(tab, 1, total_number)\n', (2671, 2693), True, 'import mutation.gateCirq_EqualT as MC\n'), ((2757, 2801), 'mutation.gateCirq_EqualT.two_SWAP', 'MC.two_SWAP', (['tab', 'qubit_number', 'total_number'], {}), '(tab, qubit_number, total_number)\n', (2768, 2801), True, 'import mutation.gateCirq_EqualT as MC\n'), ((3152, 3185), 'mutation.gatePyQuil_EqualT.two_CNOT', 'MP.two_CNOT', (['tab', '(1)', 'total_number'], {}), '(tab, 1, total_number)\n', (3163, 3185), True, 'import mutation.gatePyQuil_EqualT as MP\n'), ((3249, 3293), 'mutation.gatePyQuil_EqualT.two_CNOT', 'MP.two_CNOT', (['tab', 'qubit_number', 'total_number'], {}), '(tab, qubit_number, total_number)\n', (3260, 3293), True, 'import mutation.gatePyQuil_EqualT as MP\n'), ((3408, 3441), 'mutation.gatePyQuil_EqualT.two_SWAP', 'MP.two_SWAP', (['tab', '(1)', 'total_number'], {}), '(tab, 1, total_number)\n', (3419, 3441), True, 'import mutation.gatePyQuil_EqualT as MP\n'), ((3505, 3549), 'mutation.gatePyQuil_EqualT.two_SWAP', 'MP.two_SWAP', (['tab', 'qubit_number', 'total_number'], {}), '(tab, qubit_number, total_number)\n', (3516, 3549), True, 'import mutation.gatePyQuil_EqualT as MP\n')] |
import functools
import os
from fase_lib.base_util import singleton_util
TEMPLATE_SYMBOL = '@'
PIXEL_DENSITY_STEP = 0.25
PIXEL_DENSITY_MIN = 0
PIXEL_DENSITY_MAX = 10
@singleton_util.Singleton()
class ResourceManager():
def __init__(self, resource_dir):
self.resource_dir = resource_dir
def GetResourceDir(self):
return self.resource_dir
@functools.lru_cache(maxsize=None, typed=True)
def GetResourceFilename(self, filename, pixel_density):
if TEMPLATE_SYMBOL in filename:
return self.ResolveResourceFilename(filename, pixel_density)
if os.path.isfile(os.path.join(self.resource_dir, filename)):
return filename
return None
def ResolveResourceFilename(self, filename_template, pixel_density):
assert pixel_density % PIXEL_DENSITY_STEP == 0
for direction in [-1, 1]:
current_pixel_density = pixel_density
while ((direction == -1 and current_pixel_density > PIXEL_DENSITY_MIN) or
(direction == 1 and current_pixel_density < PIXEL_DENSITY_MAX)):
current_pixel_density_str = ('%.2f' % current_pixel_density).replace('.', '_')
filename = filename_template.replace(TEMPLATE_SYMBOL, current_pixel_density_str)
if os.path.isfile(os.path.join(self.resource_dir, filename)):
return filename
current_pixel_density += PIXEL_DENSITY_STEP * direction
return None
| [
"os.path.join",
"functools.lru_cache",
"fase_lib.base_util.singleton_util.Singleton"
] | [((171, 197), 'fase_lib.base_util.singleton_util.Singleton', 'singleton_util.Singleton', ([], {}), '()\n', (195, 197), False, 'from fase_lib.base_util import singleton_util\n'), ((359, 404), 'functools.lru_cache', 'functools.lru_cache', ([], {'maxsize': 'None', 'typed': '(True)'}), '(maxsize=None, typed=True)\n', (378, 404), False, 'import functools\n'), ((588, 629), 'os.path.join', 'os.path.join', (['self.resource_dir', 'filename'], {}), '(self.resource_dir, filename)\n', (600, 629), False, 'import os\n'), ((1229, 1270), 'os.path.join', 'os.path.join', (['self.resource_dir', 'filename'], {}), '(self.resource_dir, filename)\n', (1241, 1270), False, 'import os\n')] |
from IA.model import model
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
import logging
import json
TOKEN = json.load(open('telegram_bot/token.json'))['token']
updater = Updater(token=TOKEN, use_context=True)
dispatcher = updater.dispatcher
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
def start(update, context):
context.bot.send_message(chat_id=update.effective_chat.id,
text='Olá, sou o iKnox. Fique a vontade para perguntar.')
def reply(update, context):
question = update.message.text
answer = model.predict(question)
context.bot.send_message(
chat_id=update.effective_chat.id,
text=answer
)
start_handler = CommandHandler('start', start)
dispatcher.add_handler(start_handler)
reply_handler = MessageHandler(Filters.text, reply)
dispatcher.add_handler(reply_handler)
updater.start_polling() | [
"logging.basicConfig",
"IA.model.model.predict",
"telegram.ext.MessageHandler",
"telegram.ext.CommandHandler",
"telegram.ext.Updater"
] | [((200, 238), 'telegram.ext.Updater', 'Updater', ([], {'token': 'TOKEN', 'use_context': '(True)'}), '(token=TOKEN, use_context=True)\n', (207, 238), False, 'from telegram.ext import Updater, CommandHandler, MessageHandler, Filters\n'), ((272, 379), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""', 'level': 'logging.INFO'}), "(format=\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\n", (291, 379), False, 'import logging\n'), ((791, 821), 'telegram.ext.CommandHandler', 'CommandHandler', (['"""start"""', 'start'], {}), "('start', start)\n", (805, 821), False, 'from telegram.ext import Updater, CommandHandler, MessageHandler, Filters\n'), ((877, 912), 'telegram.ext.MessageHandler', 'MessageHandler', (['Filters.text', 'reply'], {}), '(Filters.text, reply)\n', (891, 912), False, 'from telegram.ext import Updater, CommandHandler, MessageHandler, Filters\n'), ((652, 675), 'IA.model.model.predict', 'model.predict', (['question'], {}), '(question)\n', (665, 675), False, 'from IA.model import model\n')] |
"""Tabular iterators."""
from typing import Optional
from typing import Tuple
from typing import Union
from typing import cast
import cupy as cp
from lightautoml.dataset.gpu.gpu_dataset import CupyDataset
from lightautoml.dataset.gpu.gpu_dataset import CudfDataset
from lightautoml.dataset.gpu.gpu_dataset import DaskCudfDataset
from lightautoml.validation.base import CustomIdxs
from lightautoml.validation.base import CustomIterator
from lightautoml.validation.base import DummyIterator
from lightautoml.validation.base import HoldoutIterator
from lightautoml.validation.base import TrainValidIterator
GpuDataset = Union[CupyDataset, CudfDataset, DaskCudfDataset]
class HoldoutIterator_gpu(HoldoutIterator):
"""Iterator for classic holdout - just predefined train and valid samples (GPU version requires indexing)."""
def __init__(self, train: GpuDataset, valid: GpuDataset):
"""Create iterator.
Args:
train: Dataset of train data.
valid: Dataset of valid data.
"""
self.train = train
self.valid = valid
def __len__(self) -> Optional[int]:
"""Get 1 len.
Returns:
1
"""
return 1
def __iter__(self) -> 'HoldoutIterator_gpu':
"""Simple iterable object.
Returns:
Iterable object for train validation dataset.
"""
return iter([(None, self.train, self.valid)])
def __getitem__(self, number):
if number >= 1:
raise IndexError('index out of range')
return None, self.train, self.valid
class FoldsIterator_gpu(TrainValidIterator):
"""Classic cv iterator.
Folds should be defined in Reader, based on cross validation method.
"""
def __init__(self, train: GpuDataset, n_folds: Optional[int] = None):
"""Creates iterator (GPU version).
Args:
train: Dataset for folding.
n_folds: Number of folds.
"""
assert hasattr(train, 'folds'), 'Folds in dataset should be defined to make folds iterator.'
self.train = train
max_folds = train.folds.max()
if type(train) == DaskCudfDataset:
max_folds = max_folds.compute()
self.n_folds = max_folds + 1
if n_folds is not None:
self.n_folds = min(self.n_folds, n_folds)
def __len__(self) -> int:
"""Get len of iterator.
Returns:
Number of folds.
"""
return self.n_folds
def __iter__(self) -> 'FoldsIterator':
"""Set counter to 0 and return self.
Returns:
Iterator for folds.
"""
self._curr_idx = 0
return self
def __getitem__(self, number):
if number >= self.n_folds:
raise IndexError('index out of range')
val_idx = (self.train.folds == number)
if type(self.train) == CudfDataset:
val_idx = val_idx.values
elif type(self.train) == DaskCudfDataset:
val_idx = val_idx.compute().values
tr_idx = cp.logical_not(val_idx)
idx = cp.arange(self.train.shape[0])
tr_idx, val_idx = idx[tr_idx], idx[val_idx]
if type(self.train) == DaskCudfDataset:
tr_idx = tr_idx.get()
val_idx = val_idx.get()
train, valid = self.train[tr_idx], self.train[val_idx]
return val_idx, cast(GpuDataset, train), cast(GpuDataset, valid)
def __next__(self) -> Tuple[cp.ndarray, GpuDataset, GpuDataset]:
"""Define how to get next object.
Returns:
Mask for current fold, train dataset, validation dataset.
"""
if self._curr_idx == self.n_folds:
raise StopIteration
val_idx = (self.train.folds == self._curr_idx)
if type(self.train) == CudfDataset:
val_idx = val_idx.values
elif type(self.train) == DaskCudfDataset:
val_idx = val_idx.compute().values
tr_idx = cp.logical_not(val_idx)
idx = cp.arange(self.train.shape[0])
tr_idx, val_idx = idx[tr_idx], idx[val_idx]
if type(self.train) == DaskCudfDataset:
tr_idx = tr_idx.get()
val_idx = val_idx.get()
train, valid = self.train[tr_idx], self.train[val_idx]
self._curr_idx += 1
return val_idx, train, valid
def get_validation_data(self) -> GpuDataset:
"""Just return train dataset.
Returns:
Whole train dataset.
"""
return self.train
def convert_to_holdout_iterator(self) -> HoldoutIterator_gpu:
"""Convert iterator to hold-out-iterator.
Fold 0 is used for validation, everything else is used for training.
Returns:
new hold-out-iterator.
"""
val_idx = (self.train.folds == 0)
if type(self.train) == CudfDataset:
val_idx = val_idx.values
elif type(self.train) == DaskCudfDataset:
val_idx = val_idx.compute().values
tr_idx = cp.logical_not(val_idx)
idx = cp.arange(self.train.shape[0])
tr_idx, val_idx = idx[tr_idx], idx[val_idx]
if type(self.train) == DaskCudfDataset:
tr_idx = tr_idx.get()
val_idx = val_idx.get()
train, valid = self.train[tr_idx], self.train[val_idx]
return HoldoutIterator_gpu(train, valid)
def get_gpu_iterator(
train: GpuDataset,
valid: Optional[GpuDataset] = None,
n_folds: Optional[int] = None,
iterator: Optional[CustomIdxs] = None
) -> Union[FoldsIterator_gpu, HoldoutIterator_gpu, HoldoutIterator, CustomIterator, DummyIterator]:
"""Get iterator for gpu dataset.
If valid is defined, other parameters are ignored.
Else if iterator is defined n_folds is ignored.
Else if n_folds is defined iterator will be created by folds index.
Else ``DummyIterator`` - (train, train) will be created.
Args:
train: ``LAMLDataset`` to train.
valid: Optional ``LAMLDataset`` for validate.
n_folds: maximum number of folds to iterate.
If ``None`` - iterate through all folds.
iterator: Takes dataset as input and return an iterator
of indexes of train/valid for train dataset.
Returns:
new train-validation iterator.
"""
if valid is not None:
train_valid = HoldoutIterator(train, valid)
elif iterator is not None:
train_valid = CustomIterator(train, iterator)
elif train.folds is not None:
train_valid = FoldsIterator_gpu(train, n_folds)
else:
train_valid = DummyIterator(train)
return train_valid
| [
"lightautoml.validation.base.DummyIterator",
"cupy.arange",
"lightautoml.validation.base.CustomIterator",
"lightautoml.validation.base.HoldoutIterator",
"cupy.logical_not",
"typing.cast"
] | [((3087, 3110), 'cupy.logical_not', 'cp.logical_not', (['val_idx'], {}), '(val_idx)\n', (3101, 3110), True, 'import cupy as cp\n'), ((3125, 3155), 'cupy.arange', 'cp.arange', (['self.train.shape[0]'], {}), '(self.train.shape[0])\n', (3134, 3155), True, 'import cupy as cp\n'), ((4011, 4034), 'cupy.logical_not', 'cp.logical_not', (['val_idx'], {}), '(val_idx)\n', (4025, 4034), True, 'import cupy as cp\n'), ((4049, 4079), 'cupy.arange', 'cp.arange', (['self.train.shape[0]'], {}), '(self.train.shape[0])\n', (4058, 4079), True, 'import cupy as cp\n'), ((5056, 5079), 'cupy.logical_not', 'cp.logical_not', (['val_idx'], {}), '(val_idx)\n', (5070, 5079), True, 'import cupy as cp\n'), ((5094, 5124), 'cupy.arange', 'cp.arange', (['self.train.shape[0]'], {}), '(self.train.shape[0])\n', (5103, 5124), True, 'import cupy as cp\n'), ((6391, 6420), 'lightautoml.validation.base.HoldoutIterator', 'HoldoutIterator', (['train', 'valid'], {}), '(train, valid)\n', (6406, 6420), False, 'from lightautoml.validation.base import HoldoutIterator\n'), ((3415, 3438), 'typing.cast', 'cast', (['GpuDataset', 'train'], {}), '(GpuDataset, train)\n', (3419, 3438), False, 'from typing import cast\n'), ((3440, 3463), 'typing.cast', 'cast', (['GpuDataset', 'valid'], {}), '(GpuDataset, valid)\n', (3444, 3463), False, 'from typing import cast\n'), ((6474, 6505), 'lightautoml.validation.base.CustomIterator', 'CustomIterator', (['train', 'iterator'], {}), '(train, iterator)\n', (6488, 6505), False, 'from lightautoml.validation.base import CustomIterator\n'), ((6628, 6648), 'lightautoml.validation.base.DummyIterator', 'DummyIterator', (['train'], {}), '(train)\n', (6641, 6648), False, 'from lightautoml.validation.base import DummyIterator\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from nltk.tokenize import TweetTokenizer
from nltk.stem.snowball import SnowballStemmer
import numpy as np
from collections import defaultdict,Counter
import logging as logger
from nortok.stopwords import get_norwegian_stopwords
import pickle
import gzip
def dd_def():
return 0
def _fit_tokenizer(texts,tokfunc,max_length=None):
"""
Makes word dictionaries based on text. Zero (0) is reserved.
"""
doc_count=0
wordcount=Counter()
ind2word={}
word2ind=defaultdict(dd_def)
maxInd=0
for text in texts:
out=tokfunc(text,max_length=max_length)
for c in out:
wordcount[c]+=1
os=set(out)
for c in os:
if c not in word2ind:
maxInd+=1
word2ind[c]=maxInd
ind2word[maxInd]=c
return word2ind,ind2word,wordcount
def _max_word_vocab(word2ind,ind2word,wcs,max_words=None):
if max_words is None:
return word2ind,ind2word
if len(word2ind)<max_words:
logger.info('len(word2ind)<=max_words:{0}'.format(max_words))
return word2ind,ind2word
w2i=defaultdict(dd_def)
i2w={}
wordInd=1
for w in wcs:
word=w[0]
if wordInd>=max_words:
break
w2i[word]=wordInd
i2w[wordInd]=word
wordInd+=1
return w2i,i2w
def _texts_to_seqs(texts,tokfunc,word2ind,max_len,n_texts=None):
if n_texts is None:
n_texts=len(texts)
seqs=np.zeros((n_texts,max_len),dtype='int32')
for iii,txt in enumerate(texts):
toks=tokfunc(txt,max_len)
ml=min(max_len,len(toks))
seqs[iii,:ml]=[word2ind[tok] for tok in toks]
return seqs
def texts_to_seqs_var(texts,tokfunc,word2ind,max_len=None):
for txt in texts:
toks=tokfunc(txt,max_length=max_len)
yield [word2ind[tok] for tok in toks]
class BaseTokenizer(object):
def __init__(self,word2ind=None,max_words=None,min_frequency=2,**kwargs):
if word2ind is not None:
self.document_count=1
self.word2ind=defaultdict(dd_def,word2ind)
self.min_frequency=min_frequency
def tokenize(self,text,max_length=None):
toks=text.split()
if max_length:
toks=toks[:max_length]
return toks
def texts_to_sequences(self,texts,max_len,n_texts=None):
seqs=_texts_to_seqs(texts,self.tokenize,self.word2ind,max_len,n_texts)
return seqs
def var_length_texts_to_sequences(self,texts):
return texts_to_seqs_var(texts,self.tokenize,self.word2ind)
def fit_tokenizer(self,texts,max_length,max_words=None):
word2ind,ind2word,wordcount=_fit_tokenizer(texts,self.tokenize,max_length=max_length)
wordcount=dict((q,r) for q,r in wordcount.items() if r>=self.min_frequency)
def skey(x):
return x[1]
wcs=list(wordcount.items())
wcs.sort(key=skey,reverse=True)
self.wordcount=wcs
self.word2ind,self.ind2word=_max_word_vocab(word2ind,ind2word,self.wordcount,max_words)
self.max_words=max_words
def prune_vocab(self,max_words=None):
if max_words>=self.max_words:
raise ValueError("Can't prune with larger vocabulary.")
self.word2ind,self.ind2word=_max_word_vocab(self.word2ind,self.ind2word,self.wordcount,max_words)
self.max_words=max_words
def save_tokenizer(self,savepath,extraobjs=None):
w2i=list(self.word2ind.items())
i2w=list(self.ind2word.items())
outdict={'word2ind':w2i,'ind2word':i2w,'max_words':self.max_words}
if extraobjs:
outdict.update(extraobjs)
with gzip.open(savepath,'wb') as ff:
pickle.dump(outdict,ff)
@staticmethod
def load_tokenizer(savepath,initClass=None):
with gzip.open(savepath,'rb') as ff:
indict=pickle.load(ff)
indict['word2ind']=defaultdict(dd_def,indict['word2ind'])
indict['ind2word']=dict(indict['ind2word'])
tok=initClass(indict)
for k,v in indict.items():
setattr(tok,k,v)
return tok
class WordTokenizer(BaseTokenizer):
def __init__(self,word2ind=None,use_stopwords=False,use_stemmer=False,max_words=None,**kwargs):
self.strip_handles=False
super(WordTokenizer, self).__init__(**kwargs)
self.tweetok=TweetTokenizer(**kwargs)
if use_stopwords:
if isinstance(use_stopwords,set):
self.stopwords=use_stopwords
else:
self.stopwords=get_norwegian_stopwords()
else:
self.stopwords=False
self.use_stemmer=use_stemmer
if use_stemmer==True:
self.stemmer=SnowballStemmer('norwegian')
def def_eobjs(self):
return {'use_stemmer':self.use_stemmer,'stopwords':self.stopwords}
def tokenize(self,text,max_length=None):
toks=self.tweetok.tokenize(text)
if max_length:
toks=toks[:max_length]
if self.stopwords and (not self.use_stemmer):
toks=[t for t in toks if t not in self.stopwords]
elif self.stopwords and self.use_stemmer:
toks=[self.stemmer.stem(t) for t in toks if t not in self.stopwords]
elif (not self.stopwords) and self.use_stemmer:
toks=[self.stemmer.stem(t) for t in toks]
return toks
def save_tokenizer(self,savepath):
eobjs=self.def_eobjs()
super(WordTokenizer, self).save_tokenizer(savepath=savepath,extraobjs=eobjs)
class RawCharTokenizer(BaseTokenizer):
def __init__(self,word2ind=None,max_words=None):
self.max_words=max_words
def tokenize(self,text,max_length=None):
toks=list(text.lower())
if max_length:
toks=toks[:max_length]
return toks
class HierarchicalTokenizer(BaseTokenizer):
def __init__(self,word2ind=None,max_words=None):
self.max_words=max_words
self.wordtok=WordTokenizer()
self.chartok=RawCharTokenizer(max_words=max_words)
def tokenize(self,text,max_len_words=512,max_len_chars=20):
wds=self.wordtok.tokenize(text,max_length=max_len_words)
toks=[self.chartok.tokenize(wd,max_length=max_len_chars)]
return toks
| [
"nltk.tokenize.TweetTokenizer",
"pickle.dump",
"gzip.open",
"pickle.load",
"collections.Counter",
"nltk.stem.snowball.SnowballStemmer",
"numpy.zeros",
"collections.defaultdict",
"nortok.stopwords.get_norwegian_stopwords"
] | [((494, 503), 'collections.Counter', 'Counter', ([], {}), '()\n', (501, 503), False, 'from collections import defaultdict, Counter\n'), ((533, 552), 'collections.defaultdict', 'defaultdict', (['dd_def'], {}), '(dd_def)\n', (544, 552), False, 'from collections import defaultdict, Counter\n'), ((1159, 1178), 'collections.defaultdict', 'defaultdict', (['dd_def'], {}), '(dd_def)\n', (1170, 1178), False, 'from collections import defaultdict, Counter\n'), ((1506, 1549), 'numpy.zeros', 'np.zeros', (['(n_texts, max_len)'], {'dtype': '"""int32"""'}), "((n_texts, max_len), dtype='int32')\n", (1514, 1549), True, 'import numpy as np\n'), ((3933, 3972), 'collections.defaultdict', 'defaultdict', (['dd_def', "indict['word2ind']"], {}), "(dd_def, indict['word2ind'])\n", (3944, 3972), False, 'from collections import defaultdict, Counter\n'), ((4382, 4406), 'nltk.tokenize.TweetTokenizer', 'TweetTokenizer', ([], {}), '(**kwargs)\n', (4396, 4406), False, 'from nltk.tokenize import TweetTokenizer\n'), ((2098, 2127), 'collections.defaultdict', 'defaultdict', (['dd_def', 'word2ind'], {}), '(dd_def, word2ind)\n', (2109, 2127), False, 'from collections import defaultdict, Counter\n'), ((3690, 3715), 'gzip.open', 'gzip.open', (['savepath', '"""wb"""'], {}), "(savepath, 'wb')\n", (3699, 3715), False, 'import gzip\n'), ((3734, 3758), 'pickle.dump', 'pickle.dump', (['outdict', 'ff'], {}), '(outdict, ff)\n', (3745, 3758), False, 'import pickle\n'), ((3839, 3864), 'gzip.open', 'gzip.open', (['savepath', '"""rb"""'], {}), "(savepath, 'rb')\n", (3848, 3864), False, 'import gzip\n'), ((3890, 3905), 'pickle.load', 'pickle.load', (['ff'], {}), '(ff)\n', (3901, 3905), False, 'import pickle\n'), ((4739, 4767), 'nltk.stem.snowball.SnowballStemmer', 'SnowballStemmer', (['"""norwegian"""'], {}), "('norwegian')\n", (4754, 4767), False, 'from nltk.stem.snowball import SnowballStemmer\n'), ((4573, 4598), 'nortok.stopwords.get_norwegian_stopwords', 'get_norwegian_stopwords', ([], {}), '()\n', (4596, 4598), False, 'from nortok.stopwords import get_norwegian_stopwords\n')] |
from django import template
from django.utils.translation import pgettext
from django.templatetags.static import static
from django.template.defaultfilters import safe, truncatechars
register = template.Library()
@register.filter()
def safe_truncate(string, arg):
return truncatechars(safe(string), arg) | [
"django.template.defaultfilters.safe",
"django.template.Library"
] | [((195, 213), 'django.template.Library', 'template.Library', ([], {}), '()\n', (211, 213), False, 'from django import template\n'), ((292, 304), 'django.template.defaultfilters.safe', 'safe', (['string'], {}), '(string)\n', (296, 304), False, 'from django.template.defaultfilters import safe, truncatechars\n')] |
import numpy.testing as test
import numpy as np
from unittest import TestCase
from PyFVCOM.ocean import *
class OceanToolsTest(TestCase):
def setUp(self):
""" Make a set of data for the various ocean tools functions """
self.lat = 30
self.z = np.array(9712.02)
self.t = np.array(40)
self.s = np.array(40)
self.p = np.array(10000)
self.pr = np.array(0)
self.c = np.array(1.888091)
self.td = np.array(20) # for dens_jackett
self.sd = np.array(20) # for dens_jackett
self.pd = np.array(1000) # for dens_jackett
self.cond = np.array(53000) # for cond2salt
self.h = np.array((10, 20, 30, 100)) # depths for stokes
self.U = 0.25 # U for stokes and dissipation
self.omega = 1 / 44714.1647021416 # omega for stokes
self.z0 = np.array((0.0025)) # z0 for stokes
self.rho = 1025
self.temp = np.arange(-20, 50, 10)
self.dew = np.linspace(0, 20, len(self.temp))
# Use some of the Fofonoff and Millard (1983) checks.
def test_sw_svan(self):
""" Specific volume anomaly """
test_svan = 9.8130210e-6
res_svan = sw_svan(self.t, self.s, self.p)
test.assert_almost_equal(res_svan, test_svan, decimal=1)
def test_res_z(self):
""" Pressure to depth """
test_z = 9712.02
res_z = pressure2depth(self.p, self.lat)
# Hmmm, not very accurate!
test.assert_almost_equal(res_z, test_z, decimal=-1)
# The return to depth is a bit inaccurate, not sure why.
def test_depth2pressure(self):
""" Depth to pressure """
test_p = 9712.653
res_pres = depth2pressure(self.z, self.lat)
# Hmmm, horribly inaccurate!
test.assert_almost_equal(res_pres, test_p, decimal=-4)
def test_cp_sw(self):
""" Specific heat of seawater """
test_cp = 3849.5
res_cp = cp_sw(self.t, self.s, self.p)
test.assert_almost_equal(res_cp, test_cp, decimal=1)
def test_dT_adiab_sw(self):
""" Adiabatic temperature gradient """
test_atg = 0.0003255976
res_atg = dT_adiab_sw(self.t, self.s, self.p)
test.assert_almost_equal(res_atg, test_atg, decimal=6)
def test_theta_sw(self):
""" Potential temperature for sea water """
test_theta = 36.89073
res_theta = theta_sw(self.t, self.s, self.p, self.pr)
test.assert_almost_equal(res_theta, test_theta, decimal=2)
def test_sw_sal78(self):
""" Salinity from conductivity, temperature and pressure (sw_sal78) """
test_salinity = 40
res_sal78 = sw_sal78(self.c, self.t, self.p)
test.assert_almost_equal(res_sal78, test_salinity, decimal=5)
def test_dens_jackett(self):
""" Density from temperature, salinity and pressure """
test_dens = 1017.728868019642
res_dens = dens_jackett(self.td, self.sd, self.pd)
test.assert_equal(res_dens, test_dens)
def test_cond2salt(self):
""" Conductivity to salinity """
test_salt = 34.935173507811783
res_salt = cond2salt(self.cond)
test.assert_equal(res_salt, test_salt)
# def test_stokes(self):
# """ Stokes number """
# test_stokes, test_u_star, test_delta = np.nan, np.nan, np.nan
# res_stokes, res_u_star, res_delta = stokes(self.h, self.U, self.omega, self.z0, U_star=True, delta=True)
# test.assert_equal(res_stokes, test_stokes)
# test.assert_equal(res_u_star, test_u_star)
# test.assert_equal(res_delta, test_delta)
def test_dissipation(self):
""" Tidal dissipation for a given tidal harmonic """
test_dissipation = 0.0400390625
res_dissipation = dissipation(self.rho, self.U)
test.assert_equal(res_dissipation, test_dissipation)
def test_rhum(self):
""" Relative humidity from dew temperature and air temperature """
test_rhum = np.array((487.36529085, 270.83391406, 160.16590946, 100.0, 65.47545095, 44.70251971, 31.67003471))
res_rhum = rhum(self.dew, self.temp)
test.assert_almost_equal(res_rhum, test_rhum)
| [
"numpy.array",
"numpy.testing.assert_equal",
"numpy.arange",
"numpy.testing.assert_almost_equal"
] | [((275, 292), 'numpy.array', 'np.array', (['(9712.02)'], {}), '(9712.02)\n', (283, 292), True, 'import numpy as np\n'), ((310, 322), 'numpy.array', 'np.array', (['(40)'], {}), '(40)\n', (318, 322), True, 'import numpy as np\n'), ((340, 352), 'numpy.array', 'np.array', (['(40)'], {}), '(40)\n', (348, 352), True, 'import numpy as np\n'), ((370, 385), 'numpy.array', 'np.array', (['(10000)'], {}), '(10000)\n', (378, 385), True, 'import numpy as np\n'), ((404, 415), 'numpy.array', 'np.array', (['(0)'], {}), '(0)\n', (412, 415), True, 'import numpy as np\n'), ((433, 451), 'numpy.array', 'np.array', (['(1.888091)'], {}), '(1.888091)\n', (441, 451), True, 'import numpy as np\n'), ((470, 482), 'numpy.array', 'np.array', (['(20)'], {}), '(20)\n', (478, 482), True, 'import numpy as np\n'), ((521, 533), 'numpy.array', 'np.array', (['(20)'], {}), '(20)\n', (529, 533), True, 'import numpy as np\n'), ((572, 586), 'numpy.array', 'np.array', (['(1000)'], {}), '(1000)\n', (580, 586), True, 'import numpy as np\n'), ((627, 642), 'numpy.array', 'np.array', (['(53000)'], {}), '(53000)\n', (635, 642), True, 'import numpy as np\n'), ((677, 704), 'numpy.array', 'np.array', (['(10, 20, 30, 100)'], {}), '((10, 20, 30, 100))\n', (685, 704), True, 'import numpy as np\n'), ((860, 876), 'numpy.array', 'np.array', (['(0.0025)'], {}), '(0.0025)\n', (868, 876), True, 'import numpy as np\n'), ((940, 962), 'numpy.arange', 'np.arange', (['(-20)', '(50)', '(10)'], {}), '(-20, 50, 10)\n', (949, 962), True, 'import numpy as np\n'), ((1236, 1292), 'numpy.testing.assert_almost_equal', 'test.assert_almost_equal', (['res_svan', 'test_svan'], {'decimal': '(1)'}), '(res_svan, test_svan, decimal=1)\n', (1260, 1292), True, 'import numpy.testing as test\n'), ((1471, 1522), 'numpy.testing.assert_almost_equal', 'test.assert_almost_equal', (['res_z', 'test_z'], {'decimal': '(-1)'}), '(res_z, test_z, decimal=-1)\n', (1495, 1522), True, 'import numpy.testing as test\n'), ((1777, 1831), 'numpy.testing.assert_almost_equal', 'test.assert_almost_equal', (['res_pres', 'test_p'], {'decimal': '(-4)'}), '(res_pres, test_p, decimal=-4)\n', (1801, 1831), True, 'import numpy.testing as test\n'), ((1981, 2033), 'numpy.testing.assert_almost_equal', 'test.assert_almost_equal', (['res_cp', 'test_cp'], {'decimal': '(1)'}), '(res_cp, test_cp, decimal=1)\n', (2005, 2033), True, 'import numpy.testing as test\n'), ((2208, 2262), 'numpy.testing.assert_almost_equal', 'test.assert_almost_equal', (['res_atg', 'test_atg'], {'decimal': '(6)'}), '(res_atg, test_atg, decimal=6)\n', (2232, 2262), True, 'import numpy.testing as test\n'), ((2445, 2503), 'numpy.testing.assert_almost_equal', 'test.assert_almost_equal', (['res_theta', 'test_theta'], {'decimal': '(2)'}), '(res_theta, test_theta, decimal=2)\n', (2469, 2503), True, 'import numpy.testing as test\n'), ((2702, 2763), 'numpy.testing.assert_almost_equal', 'test.assert_almost_equal', (['res_sal78', 'test_salinity'], {'decimal': '(5)'}), '(res_sal78, test_salinity, decimal=5)\n', (2726, 2763), True, 'import numpy.testing as test\n'), ((2967, 3005), 'numpy.testing.assert_equal', 'test.assert_equal', (['res_dens', 'test_dens'], {}), '(res_dens, test_dens)\n', (2984, 3005), True, 'import numpy.testing as test\n'), ((3165, 3203), 'numpy.testing.assert_equal', 'test.assert_equal', (['res_salt', 'test_salt'], {}), '(res_salt, test_salt)\n', (3182, 3203), True, 'import numpy.testing as test\n'), ((3808, 3860), 'numpy.testing.assert_equal', 'test.assert_equal', (['res_dissipation', 'test_dissipation'], {}), '(res_dissipation, test_dissipation)\n', (3825, 3860), True, 'import numpy.testing as test\n'), ((3982, 4085), 'numpy.array', 'np.array', (['(487.36529085, 270.83391406, 160.16590946, 100.0, 65.47545095, 44.70251971,\n 31.67003471)'], {}), '((487.36529085, 270.83391406, 160.16590946, 100.0, 65.47545095, \n 44.70251971, 31.67003471))\n', (3990, 4085), True, 'import numpy as np\n'), ((4134, 4179), 'numpy.testing.assert_almost_equal', 'test.assert_almost_equal', (['res_rhum', 'test_rhum'], {}), '(res_rhum, test_rhum)\n', (4158, 4179), True, 'import numpy.testing as test\n')] |
#!/usr/bin/python
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import os
import sys
import argparse
import netaddr
import netifaces
import ConfigParser
import platform
from fabric.api import local
from contrail_provisioning.common.base import ContrailSetup
from contrail_provisioning.compute.network import ComputeNetworkSetup
from contrail_provisioning.common.templates import keepalived_conf_template
(PLATFORM, VERSION, EXTRA) = platform.linux_distribution()
class KeepalivedSetup(ContrailSetup, ComputeNetworkSetup):
def __init__(self, args_str = None):
super(KeepalivedSetup, self).__init__()
self._args = None
if not args_str:
args_str = ' '.join(sys.argv[1:])
self.parse_args(args_str)
def parse_args(self, args_str):
'''
Eg. setup-vnc-keepalived --self_ip 10.1.5.11 --mgmt_self_ip 172.16.31.10
--self_index 1 --internal_vip 10.1.5.13 --external_vip 172.16.58.3
'''
parser = self._parse_args(args_str)
parser.add_argument("--role", help = "Role of the node")
parser.add_argument("--self_ip", help = "IP Address of this system")
parser.add_argument("--mgmt_self_ip", help = "Management IP Address of this system")
parser.add_argument("--internal_vip", help = "Internal(private) Virtual IP Addresses of HA nodes"),
parser.add_argument("--external_vip", help = "External(public) Virtual IP Addresses of HA nodes"),
parser.add_argument("--self_index", help = "The index of this HA node", type=int)
parser.add_argument("--num_nodes", help = "Number of available HA node")
parser.add_argument("--internal_virtual_router_id", help = "Internal Virtual router ID", type=int)
parser.add_argument("--external_virtual_router_id", help = "External Virtual router ID", type=int)
self._args = parser.parse_args(self.remaining_argv)
def fixup_config_files(self):
vip_for_ips = [(self._args.internal_vip, self._args.self_ip, 'INTERNAL')]
internal_device=self.get_device_by_ip(self._args.self_ip)
if self._args.external_vip:
vip_for_ips.append((self._args.external_vip, self._args.mgmt_self_ip, 'EXTERNAL'))
ext_device=self.get_device_by_ip(self._args.mgmt_self_ip)
else:
ext_device=internal_device
for vip, ip, vip_name in vip_for_ips:
# keepalived.conf
device = self.get_device_by_ip(ip)
netmask = netifaces.ifaddresses(device)[netifaces.AF_INET][0]['netmask']
prefix = netaddr.IPNetwork('%s/%s' % (ip, netmask)).prefixlen
state = 'BACKUP'
delay = 1
preempt_delay = 1
timeout = 1
rise = 1
fall = 1
garp_master_repeat = 3
garp_master_refresh = 1
ctrl_data_timeout=3
ctrl_data_rise=1
ctrl_data_fall=1
if self._args.self_index == 1:
state = 'MASTER'
delay = 5
preempt_delay = 7
timeout = 3
rise = 2
fall = 2
if vip_name == 'INTERNAL':
router_id = self._args.internal_virtual_router_id
external_device = internal_device
else:
router_id = self._args.external_virtual_router_id
external_device = ext_device
priority = (100 - self._args.self_index)
if self._args.num_nodes > 2 and self._args.self_index == 2:
state = 'MASTER'
vip_str = '_'.join([vip_name] + vip.split('.'))
template_vals = {'__device__': device,
'__router_id__' : router_id,
'__state__' : state,
'__delay__' : delay,
'__garp_master_repeat__' : garp_master_repeat,
'__garp_master_refresh__' : garp_master_refresh,
'__preempt_delay__' : preempt_delay,
'__priority__' : priority,
'__virtual_ip__' : vip,
'__virtual_ip_mask__' : prefix,
'__vip_str__' : vip_str,
'__timeout__' : timeout,
'__rise__' : rise,
'__fall__' : fall,
'__cd_timeout__' : ctrl_data_timeout,
'__cd_rise__' : ctrl_data_rise,
'__cd_fall__' : ctrl_data_fall,
'__internal_device__' : internal_device,
'__external_device__' : external_device,
}
data = self._template_substitute(keepalived_conf_template.template,
template_vals)
with open(self._temp_dir_name + '/keepalived.conf', 'a+') as fp:
fp.write(data)
local("sudo mv %s/keepalived.conf /etc/keepalived/" %(self._temp_dir_name))
def run_services(self):
if PLATFORM.lower() == 'ubuntu':
local("sudo chkconfig keepalived on && sudo service keepalived restart")
else:
local("sudo systemctl enable keepalived && sudo systemctl restart keepalived")
def main(args_str = None):
keepalived = KeepalivedSetup(args_str)
keepalived.setup()
if __name__ == "__main__":
main()
| [
"netifaces.ifaddresses",
"platform.linux_distribution",
"fabric.api.local",
"netaddr.IPNetwork"
] | [((460, 489), 'platform.linux_distribution', 'platform.linux_distribution', ([], {}), '()\n', (487, 489), False, 'import platform\n'), ((5097, 5171), 'fabric.api.local', 'local', (["('sudo mv %s/keepalived.conf /etc/keepalived/' % self._temp_dir_name)"], {}), "('sudo mv %s/keepalived.conf /etc/keepalived/' % self._temp_dir_name)\n", (5102, 5171), False, 'from fabric.api import local\n'), ((5255, 5327), 'fabric.api.local', 'local', (['"""sudo chkconfig keepalived on && sudo service keepalived restart"""'], {}), "('sudo chkconfig keepalived on && sudo service keepalived restart')\n", (5260, 5327), False, 'from fabric.api import local\n'), ((5354, 5432), 'fabric.api.local', 'local', (['"""sudo systemctl enable keepalived && sudo systemctl restart keepalived"""'], {}), "('sudo systemctl enable keepalived && sudo systemctl restart keepalived')\n", (5359, 5432), False, 'from fabric.api import local\n'), ((2605, 2647), 'netaddr.IPNetwork', 'netaddr.IPNetwork', (["('%s/%s' % (ip, netmask))"], {}), "('%s/%s' % (ip, netmask))\n", (2622, 2647), False, 'import netaddr\n'), ((2521, 2550), 'netifaces.ifaddresses', 'netifaces.ifaddresses', (['device'], {}), '(device)\n', (2542, 2550), False, 'import netifaces\n')] |
# Generated by Django 3.2.7 on 2021-09-10 17:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('project', '0006_auto_20210910_1323'),
]
operations = [
migrations.AlterField(
model_name='project',
name='project_address1',
field=models.CharField(max_length=100, verbose_name='Address'),
),
migrations.AlterField(
model_name='project',
name='project_address2',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name=' '),
),
migrations.AlterField(
model_name='project',
name='project_details',
field=models.TextField(default=1, verbose_name='Details'),
preserve_default=False,
),
]
| [
"django.db.models.TextField",
"django.db.models.CharField"
] | [((347, 403), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'verbose_name': '"""Address"""'}), "(max_length=100, verbose_name='Address')\n", (363, 403), False, 'from django.db import migrations, models\n'), ((536, 609), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(100)', 'null': '(True)', 'verbose_name': '""" """'}), "(blank=True, max_length=100, null=True, verbose_name=' ')\n", (552, 609), False, 'from django.db import migrations, models\n'), ((741, 792), 'django.db.models.TextField', 'models.TextField', ([], {'default': '(1)', 'verbose_name': '"""Details"""'}), "(default=1, verbose_name='Details')\n", (757, 792), False, 'from django.db import migrations, models\n')] |
import json
from typing import List
from .customer import Customer
from .facility import Facility
class InputData:
def __init__(self, facilities: List[Facility], customers: List[Customer]):
self.facilities = facilities
self.customers = customers
def supply(self, facility_name) -> float:
for facility in self.facilities:
if facility.name == facility_name:
return facility.supply
@staticmethod
def read(fn: str):
with open(fn) as f:
data = json.load(f)
facilities = []
for facility_data in data["facilities"]:
transport_cost_data = facility_data["transportCost"]
transport_cost = {
tc["customer"]: tc["cost"] for tc in transport_cost_data
}
facility = Facility(
name=facility_data["name"],
exists=facility_data.get("exists", False),
build_cost=facility_data.get("buildCost"),
supply=facility_data["supply"],
transport_cost=transport_cost
)
facilities.append(facility)
customers = [Customer(name=customer_data["name"],demand=customer_data["demand"])
for customer_data in data["customers"]]
return InputData(facilities=facilities, customers=customers)
| [
"json.load"
] | [((535, 547), 'json.load', 'json.load', (['f'], {}), '(f)\n', (544, 547), False, 'import json\n')] |
from configs.config_handler import Config
from libs.core import FaceMaskAppEngine as CvEngine
from ui.web_gui import WebGUI as UI
from argparse import ArgumentParser
def main():
"""
Creates config and application engine module and starts ui
:return:
"""
argparse = ArgumentParser()
argparse.add_argument('--config', type=str, help='json config file path', default='configs/config.json')
args = argparse.parse_args()
config_path = args.config
print("-_- -_- -_- -_- -_- -_- -_- Running %s -_- -_- -_- -_- -_- -_- -_-" % config_path)
cfg = Config(path=config_path)
engine = CvEngine(cfg)
ui = UI(cfg, engine)
engine.set_ui(ui)
ui.start()
if __name__ == "__main__":
main()
| [
"ui.web_gui.WebGUI",
"libs.core.FaceMaskAppEngine",
"argparse.ArgumentParser",
"configs.config_handler.Config"
] | [((287, 303), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (301, 303), False, 'from argparse import ArgumentParser\n'), ((580, 604), 'configs.config_handler.Config', 'Config', ([], {'path': 'config_path'}), '(path=config_path)\n', (586, 604), False, 'from configs.config_handler import Config\n'), ((619, 632), 'libs.core.FaceMaskAppEngine', 'CvEngine', (['cfg'], {}), '(cfg)\n', (627, 632), True, 'from libs.core import FaceMaskAppEngine as CvEngine\n'), ((642, 657), 'ui.web_gui.WebGUI', 'UI', (['cfg', 'engine'], {}), '(cfg, engine)\n', (644, 657), True, 'from ui.web_gui import WebGUI as UI\n')] |
#!/usr/bin/env python3
import argparse
####################################
### Parse command-line arguments ###
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("EH_filename", type=str)
parser.add_argument("disease_locus_filename", type=str)
args = parser.parse_args()
#####################################
disease_locus_file = open(args.disease_locus_filename)
disease_locus_file.readline()
m = {}
for line in disease_locus_file:
fields = line.rstrip("\n").split("\t")
gene = fields[3]
disease_threshold = fields[4]
hg19_coords = fields[6].replace("chr", "")
hg38_coords = fields[7]
to_add = "{}:{}".format(gene, disease_threshold)
if hg19_coords:
m[hg19_coords] = to_add
if hg38_coords:
m[hg38_coords] = to_add
EH_file = open(args.EH_filename)
first_line = EH_file.readline()
fields = first_line.rstrip("\n").split("\t")
for i in range(0, len(fields)):
for coords in m:
if coords in fields[i]:
if " (smaller allele)" in fields[i]:
fields[i] = "{}:{} (smaller allele)".format(fields[i].replace(" (smaller allele)", ""), m[coords])
elif " (larger allele)" in fields[i]:
fields[i] = "{}:{} (larger allele)".format(fields[i].replace(" (larger allele)", ""), m[coords])
else:
fields[i] = "{}:{}".format(fields[i], m[coords])
print("\t".join(fields))
for line in EH_file:
print(line, end="")
| [
"argparse.ArgumentParser"
] | [((124, 203), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), '(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n', (147, 203), False, 'import argparse\n')] |
from __future__ import print_function
from src import cli
from os import environ as ENV
PROFILE=False
if PROFILE:
print("PROFILING")
import cProfile
cProfile.run("cli.main()", "restats")
import pstats
p = pstats.Stats('restats')
p.strip_dirs().sort_stats('cumulative').print_stats(50)
else:
cli.main()
| [
"pstats.Stats",
"cProfile.run",
"src.cli.main"
] | [((162, 199), 'cProfile.run', 'cProfile.run', (['"""cli.main()"""', '"""restats"""'], {}), "('cli.main()', 'restats')\n", (174, 199), False, 'import cProfile\n'), ((227, 250), 'pstats.Stats', 'pstats.Stats', (['"""restats"""'], {}), "('restats')\n", (239, 250), False, 'import pstats\n'), ((322, 332), 'src.cli.main', 'cli.main', ([], {}), '()\n', (330, 332), False, 'from src import cli\n')] |
from base64 import standard_b64decode
import logging
import os
import requests
from requests.adapters import HTTPAdapter
from requests.exceptions import ConnectionError
from urllib3.util.retry import Retry
from sdc.rabbit.exceptions import BadMessageError, RetryableError
from sdc.crypto.decrypter import decrypt as sdc_decrypt
from structlog import wrap_logger
from config import Config
from .secrets import load_secrets
# Configure the number of retries attempted before failing call
session = requests.Session()
retries = Retry(total=5, backoff_factor=0.1)
session.mount('http://', HTTPAdapter(max_retries=retries))
session.mount('https://', HTTPAdapter(max_retries=retries))
class ResponseProcessor:
def __init__(self,
key_purpose_submission,
expected_secrets=[],
logger=None):
self.key_purpose_submission = key_purpose_submission
self.secret_store = load_secrets(key_purpose_submission,
expected_secrets)
self.logger = logger or wrap_logger(logging.getLogger(__name__))
def process(self, msg, tx_id=None):
try:
self.logger.info("Received some data")
decrypted_json = sdc_decrypt(msg,
self.secret_store,
self.key_purpose_submission)
except Exception as e:
self.logger.error("Decryption error occurred. "
"Quarantining message.",
e=str(e))
raise BadMessageError
file_with_extension = decrypted_json.get('filename')
file = standard_b64decode(decrypted_json.get('file').encode('UTF8'))
filename = file_with_extension.split('.')[0]
collex_id = os.getenv('COLLEX_ID')
files = {'file':
(file_with_extension,
file,
'application/vnd.' +
'openxmlformats-officedocument.spreadsheetml.sheet',
),
}
try:
upload_url = Config.COLLECTION_INSTRUMENT_URL + \
'/collection-instrument-api/1.0.2/upload/{}/{}'
url = upload_url.format(collex_id, filename)
self.logger.info('Posting files to ras',
ex_id=collex_id,
filename=file_with_extension,
url=url)
res = session.post(url, auth=Config.BASIC_AUTH, files=files)
self.logger.info("Response", text=res.text)
self.response_ok(res)
except ConnectionError:
self.logger.error("Connection error")
raise RetryableError
def response_ok(self, res):
if res.status_code == 200 or res.status_code == 201:
self.logger.info("Returned from service",
response="ok")
return
elif res.status_code == 400:
self.logger.info("Returned from service",
response="client error")
self.logger.info("Response details",
response_status=res.status_code,
response_content=res.content)
raise BadMessageError
else:
self.logger.error("Returned from service",
response="service error")
self.logger.error("Response details",
response_status=res.status_code,
response_content=res.content)
raise RetryableError
| [
"logging.getLogger",
"requests.Session",
"os.getenv",
"urllib3.util.retry.Retry",
"sdc.crypto.decrypter.decrypt",
"requests.adapters.HTTPAdapter"
] | [((499, 517), 'requests.Session', 'requests.Session', ([], {}), '()\n', (515, 517), False, 'import requests\n'), ((528, 562), 'urllib3.util.retry.Retry', 'Retry', ([], {'total': '(5)', 'backoff_factor': '(0.1)'}), '(total=5, backoff_factor=0.1)\n', (533, 562), False, 'from urllib3.util.retry import Retry\n'), ((588, 620), 'requests.adapters.HTTPAdapter', 'HTTPAdapter', ([], {'max_retries': 'retries'}), '(max_retries=retries)\n', (599, 620), False, 'from requests.adapters import HTTPAdapter\n'), ((648, 680), 'requests.adapters.HTTPAdapter', 'HTTPAdapter', ([], {'max_retries': 'retries'}), '(max_retries=retries)\n', (659, 680), False, 'from requests.adapters import HTTPAdapter\n'), ((1818, 1840), 'os.getenv', 'os.getenv', (['"""COLLEX_ID"""'], {}), "('COLLEX_ID')\n", (1827, 1840), False, 'import os\n'), ((1237, 1301), 'sdc.crypto.decrypter.decrypt', 'sdc_decrypt', (['msg', 'self.secret_store', 'self.key_purpose_submission'], {}), '(msg, self.secret_store, self.key_purpose_submission)\n', (1248, 1301), True, 'from sdc.crypto.decrypter import decrypt as sdc_decrypt\n'), ((1073, 1100), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1090, 1100), False, 'import logging\n')] |