code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
import json
from django.core.management.base import BaseCommand
from SecuriTree.models import Door, Area, AccessRule
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('json_file', type=str)
def handle(self, *args, **options):
with open(options['json_file']) as f:
data_list = json.load(f)
for data in data_list['system_data']['areas']:
data['pk'] = data.pop('id')
data['parent_area'] = Area.objects.filter(pk = data.pop('parent_area')).first()
# Remove key because child areas have parent id. Data redundancy avoidance
# It also gives room for Areas to be nested to any depth (Epiuse System Entity Requirement)
del data['child_area_ids']
Area.objects.get_or_create(pk=data['pk'], defaults=data)
for data in data_list['system_data']['doors']:
data['pk'] = data.pop('id')
data['parent_area'] = Area.objects.get(pk = data.pop('parent_area'))
Door.objects.get_or_create(pk=data['pk'], defaults=data)
for data in data_list['system_data']['access_rules']:
data['pk'] = data.pop('id')
doors = data.pop('doors');
rule = AccessRule.objects.get_or_create(pk=data['pk'], defaults=data)
for door_id in doors:
Door.objects.get(id=door_id).accessrules.add(rule)
| [
"SecuriTree.models.Area.objects.get_or_create",
"SecuriTree.models.AccessRule.objects.get_or_create",
"SecuriTree.models.Door.objects.get_or_create",
"json.load",
"SecuriTree.models.Door.objects.get"
] | [((346, 358), 'json.load', 'json.load', (['f'], {}), '(f)\n', (355, 358), False, 'import json\n'), ((792, 848), 'SecuriTree.models.Area.objects.get_or_create', 'Area.objects.get_or_create', ([], {'pk': "data['pk']", 'defaults': 'data'}), "(pk=data['pk'], defaults=data)\n", (818, 848), False, 'from SecuriTree.models import Door, Area, AccessRule\n'), ((1038, 1094), 'SecuriTree.models.Door.objects.get_or_create', 'Door.objects.get_or_create', ([], {'pk': "data['pk']", 'defaults': 'data'}), "(pk=data['pk'], defaults=data)\n", (1064, 1094), False, 'from SecuriTree.models import Door, Area, AccessRule\n'), ((1256, 1318), 'SecuriTree.models.AccessRule.objects.get_or_create', 'AccessRule.objects.get_or_create', ([], {'pk': "data['pk']", 'defaults': 'data'}), "(pk=data['pk'], defaults=data)\n", (1288, 1318), False, 'from SecuriTree.models import Door, Area, AccessRule\n'), ((1370, 1398), 'SecuriTree.models.Door.objects.get', 'Door.objects.get', ([], {'id': 'door_id'}), '(id=door_id)\n', (1386, 1398), False, 'from SecuriTree.models import Door, Area, AccessRule\n')] |
from pstraw import Straw
from dataclasses import dataclass
from datetime import datetime
import os
case_dir = os.path.dirname(os.path.abspath(__file__))
# 实例化straw对象
db = Straw(
DB_DRIVER='mysql', # 必须输入
DB_DATABASE='straw_test', # 必须输入
DB_USER='root', # 必须输入3306, # mysql默认值:3306 , postgres默认值:5432
DB_PASSWORD='<PASSWORD>', # 必须输入
DB_HOST='127.0.0.1', # 默认值:localhost
DB_PORT='8071', # mysql默认值:3306 , postgres默认值:5432
SQL_PATH=os.path.join(case_dir,'.'),# 指定sql路径是当前目录,默认是./sql文件夹
)
'''
创建一个结构体,用于保存数据对象
'''
@dataclass
class USER():
ID: int
NAME: str
USERDESC: str
'''
@db.sql注解:绑定一个sql方法
-- SQL_TEMPLATE_TYPE = 1, 通过python字符串的%s占位符替换
'''
@db.sql(USER,SQL_NAME='dml',SQL_TEMPLATE_TYPE=1)
def TempType1():
# 返回值类型是tuple,按位置顺序替换sql中的%s
return ("('Chalk Test 5','Chalk Test 6')",)
'''
@db.sql注解:绑定一个sql方法
-- SQL_TEMPLATE_TYPE = 2, 通过参数{0} ~ {n}的形式接收内容
'''
@db.sql(USER,SQL_NAME='dml',SQL_TEMPLATE_TYPE=2)
def TempType2():
# 返回值类型是tuple,以参数的形式传递到sql中
return ("('Chalk Test 5','Chalk Test 6')","USERDESC")
'''
@db.sql注解:绑定一个sql方法
-- SQL_TEMPLATE_TYPE = 3, 字典键值匹配
-- 例如 :USERDESC1 匹配@sql注解方法返回值{"USERDESC1":value1,"USERDESC2":value2}中的value1
'''
@db.sql(USER,SQL_NAME='dml',SQL_TEMPLATE_TYPE=3)
def TempType3():
# 返回值类型是dict,按照dict的key来绑定
return {"USERDESC1":"'Chalk Test 5'","USERDESC2":"'Chalk Test 6'"}
'''
@db.sql注解:绑定一个sql方法
-- SQL_TEMPLATE_TYPE = 4, 通过python字符串的%s占位符替换
'''
@db.sql(USER,SQL_NAME='dml',SQL_TEMPLATE_TYPE=4)
def TempType4():
# 返回值类型是tuple,按位置顺序替换sql中的%s,自动填充引号
# 组成的sql字符串:SELECT ID,NAME,USERDESC FROM USER WHERE USERDESC IN ('Chalk Test 5','Chalk Test 6');
return ("Chalk Test 5","Chalk Test 6")
'''
@db.sql注解:绑定一个sql方法
-- SQL_TEMPLATE_TYPE = 5, 通过参数{0} ~ {n}的形式接收内容
'''
@db.sql(USER,SQL_NAME='dml',SQL_TEMPLATE_TYPE=5)
def TempType5():
# 返回值类型是tuple,以参数的形式传递到sql中,自动填充引号
# 组成的sql字符串:SELECT ID,NAME,USERDESC FROM USER WHERE USERDESC IN ('Chalk Test 5','Chalk Test 6');
return ("Chalk Test 5","Chalk Test 6")
'''
@db.sql注解:绑定一个sql方法
-- SQL_TEMPLATE_TYPE = 6, 字典键值匹配
-- 例如 :USERDESC1 匹配@sql注解方法返回值{"USERDESC1":value1,"USERDESC2":value2}中的value1
'''
@db.sql(USER,SQL_NAME='dml',SQL_TEMPLATE_TYPE=6)
def TempType6():
# 返回值类型是dict,按照dict的key来绑定,自动填充引号
# 组成的sql字符串:SELECT ID,NAME,USERDESC FROM USER WHERE USERDESC IN ('Chalk Test 5','Chalk Test 6');
return {"USERDESC1":"Chalk Test 5","USERDESC2":"Chalk Test 6"}
'''
@db.sql注解:绑定一个sql方法
-- QUOTATION参数改变自动填充的引号类型,默认单引号
'''
@db.sql(USER,SQL_NAME='dml',SQL_TEMPLATE_TYPE=5,QUOTATION="\"")
def quotationType():
# 自动填充双引号
# 组成的sql字符串:SELECT ID,NAME,USERDESC FROM USER WHERE USERDESC IN ("Chalk Test 5","Chalk Test 6");
return ("Chalk Test 5","Chalk Test 6")
'''
@db.sql注解:绑定一个sql方法
-- SQL_TEMPLATE_TYPE如果没有指定,默认值:SQL_TEMPLATE_TYPE=6
'''
@db.sql(USER,SQL_NAME='dml')
def defaultTempType():
# 按照SQL_TEMPLATE_TYPE=6的方式解析
# 组成的sql字符串:SELECT ID,NAME,USERDESC FROM USER WHERE USERDESC IN ('Chalk Test 5','Chalk Test 6');
return {"USERDESC1":"Chalk Test 5","USERDESC2":"Chalk Test 6"}
'''
@db.conn注解:创建一个连接
注意1:@db.sql注解的方法只能在@db.conn中被调用,但@db.sql可以调任何无注解方法
注意2:@db.conn可以调用任何无注解方法,也可以被任何无注解方法调用,但不能调用@db.conn注解的方法
'''
# 通过注解创建一个链接
@db.conn()
def ExecSQL():
tt1 = TempType1()
print("TempType1 --> ",tt1)
tt2 = TempType2()
print("TempType2 --> ",tt2)
tt3 = TempType3()
print("TempType3 --> ",tt3)
tt4 = TempType4()
print("TempType4 --> ",tt4)
tt5 = TempType5()
print("TempType5 --> ",tt5)
tt6 = TempType6()
print("TempType6 --> ",tt6)
qt = quotationType()
print("quotationType --> ",qt)
dt = defaultTempType()
print("defaultTempType --> ",dt)
ExecSQL() | [
"os.path.abspath",
"os.path.join"
] | [((127, 152), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (142, 152), False, 'import os\n'), ((459, 486), 'os.path.join', 'os.path.join', (['case_dir', '"""."""'], {}), "(case_dir, '.')\n", (471, 486), False, 'import os\n')] |
# Generated by Django 2.2.4 on 2019-08-11 17:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('alarms_project', '0002_auto_20190811_1841'),
]
operations = [
migrations.AlterField(
model_name='alarm',
name='time',
field=models.DateTimeField(),
),
]
| [
"django.db.models.DateTimeField"
] | [((340, 362), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (360, 362), False, 'from django.db import migrations, models\n')] |
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: tflite
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class HardSwishOptions(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsHardSwishOptions(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = HardSwishOptions()
x.Init(buf, n + offset)
return x
@classmethod
def HardSwishOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
# HardSwishOptions
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
def HardSwishOptionsStart(builder): builder.StartObject(0)
def HardSwishOptionsEnd(builder): return builder.EndObject()
class HardSwishOptionsT(object):
# HardSwishOptionsT
def __init__(self):
pass
@classmethod
def InitFromBuf(cls, buf, pos):
hardSwishOptions = HardSwishOptions()
hardSwishOptions.Init(buf, pos)
return cls.InitFromObj(hardSwishOptions)
@classmethod
def InitFromObj(cls, hardSwishOptions):
x = HardSwishOptionsT()
x._UnPack(hardSwishOptions)
return x
# HardSwishOptionsT
def _UnPack(self, hardSwishOptions):
if hardSwishOptions is None:
return
# HardSwishOptionsT
def Pack(self, builder):
HardSwishOptionsStart(builder)
hardSwishOptions = HardSwishOptionsEnd(builder)
return hardSwishOptions
| [
"flatbuffers.compat.import_numpy",
"flatbuffers.table.Table",
"flatbuffers.encode.Get",
"flatbuffers.util.BufferHasIdentifier"
] | [((159, 173), 'flatbuffers.compat.import_numpy', 'import_numpy', ([], {}), '()\n', (171, 173), False, 'from flatbuffers.compat import import_numpy\n'), ((315, 378), 'flatbuffers.encode.Get', 'flatbuffers.encode.Get', (['flatbuffers.packer.uoffset', 'buf', 'offset'], {}), '(flatbuffers.packer.uoffset, buf, offset)\n', (337, 378), False, 'import flatbuffers\n'), ((576, 668), 'flatbuffers.util.BufferHasIdentifier', 'flatbuffers.util.BufferHasIdentifier', (['buf', 'offset', "b'TFL3'"], {'size_prefixed': 'size_prefixed'}), "(buf, offset, b'TFL3', size_prefixed=\n size_prefixed)\n", (612, 668), False, 'import flatbuffers\n'), ((750, 783), 'flatbuffers.table.Table', 'flatbuffers.table.Table', (['buf', 'pos'], {}), '(buf, pos)\n', (773, 783), False, 'import flatbuffers\n')] |
"""
module containing functions to use ffprobe to parse video frame info
"""
from __future__ import print_function
import subprocess
import cStringIO
class base_frame(object):
"""
Base Frame from FFProbe
[FRAME]
media_type=video
stream_index=0
key_frame=0
pkt_pts=11745667
pkt_pts_time=130.507411
pkt_dts=11745667
pkt_dts_time=130.507411
best_effort_timestamp=11745667
best_effort_timestamp_time=130.507411
pkt_duration=3003
pkt_duration_time=0.033367
pkt_pos=86509020
pkt_size=13294
...
[/FRAME]
"""
def __init__(self, buf, parser):
"""
Constructs a base ffprobe frame
:param buf: buffer containing frame info
:param parser: ffprobe frame parser
"""
self.stream_index = parser.get_int(buf)
self.key_frame = parser.get_int(buf)
self.pkt_pts = parser.get_int(buf)
self.pkt_pts_time = parser.get_float(buf)
self.pkt_dts = parser.get_int(buf)
self.pkt_dts_time = parser.get_float(buf)
self.best_effort_timestamp = parser.get_int(buf)
self.best_effort_timestamp_time = parser.get_float(buf)
self.pkt_duration = parser.get_int(buf)
self.pkt_duration_time = parser.get_float(buf)
self.pkt_pos = parser.get_int(buf)
self.pkt_size = parser.get_int(buf)
class audio_frame(base_frame):
"""
Audio Frame from FFProbe
[FRAME]
...
sample_fmt=s16p
nb_samples=1152
channels=2
channel_layout=stereo
[/FRAME]
"""
def __init__(self, buf, parser):
"""
Constructs an Audio Frame from FFprobe
:param buf: buffer containing ffprobe frame info
:param parser: ffprobe frame parser
"""
super(audio_frame, self).__init__(buf, parser)
self.media_type = 'audio'
self.sample_fmt = parser.get_str(buf)
self.nb_samples = parser.get_int(buf)
self.channels = parser.get_int(buf)
self.channel_layout = parser.get_str(buf)
class video_frame(base_frame):
"""
Video Frame from FFProbe
[FRAME]
...
width=720
height=480
pix_fmt=yuv420p
sample_aspect_ratio=1:1
pict_type=B
coded_picture_number=3889
display_picture_number=0
interlaced_frame=0
top_field_first=0
repeat_pict=0
[/FRAME]
"""
def __init__(self, buf, parser):
"""
Constructs a Video Frame from ffprobe
:param buf: buffer containing ffprobe frame info
:param parser: ffprobe frame parser
"""
super(video_frame, self).__init__(buf, parser)
self.media_type = 'video'
self.width = parser.get_int(buf)
self.height = parser.get_int(buf)
self.pix_fmt = parser.get_str(buf)
self.sample_aspect_ratio = parser.get_str(buf)
self.pict_type = parser.get_str(buf)
self.coded_picture_number = parser.get_int(buf)
self.display_picture_number = parser.get_int(buf)
self.interlaced_frame = parser.get_int(buf)
self.top_field_first = parser.get_int(buf)
self.repeat_pict = parser.get_int(buf)
class side_data(object):
"""
Side Data from FFProbe
[SIDE_DATA]
side_data_type=GOP timecode
side_data_size=8
timecode=00:00:00:00
[/SIDE_DATA]
"""
def __init__(self, buf, parser):
"""
Constructs side data frame
:param buf: buffer containing ffprobe frame info
:param parser: ffprobe frame parser
"""
self.side_data_type = parser.get_str(buf)
self.side_data_size = parser.get_int(buf)
self.timecode = parser.get_str(buf)
class ffprobe_frame_info_parser(object):
"""
ffprobe frame parser, reads ffprobe entries and extracts key, value pairs
"""
def get_str(self, buf, sep='='):
_, value = buf.readline().split(sep)
return value[:-1]
def get_int(self, buf, sep='='):
_, value = buf.readline().split(sep)
value = value[:-1]
if value == 'N/A':
value = -1
else:
value = int(value)
return value
def get_float(self, buf, sep='='):
_, value = buf.readline().split(sep)
value = value[:-1]
if value == 'N/A':
value = float('nan')
else:
value = float(value)
return value
def get_entry(self, buf, sep='='):
key, value = buf.readline().split(sep)
value = value[:-1]
return key, value
def peek_line(buf):
pos = buf.tell()
line = buf.readline()
buf.seek(pos)
return line
def ffprobe_video(filename):
"""
probes a video using ffprobe subprocess
:param filename: video file to probe
:return: list of audio, video frames
"""
command = ["ffprobe", "-show_frames", filename]
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
video_frames = []
audio_frames = []
p = ffprobe_frame_info_parser()
buf = cStringIO.StringIO(out)
while True:
line = buf.readline()
if line == '':
break
else:
info_type = line[:-1]
if info_type == '[FRAME]':
media_type = p.get_str(buf)
if media_type == "video":
frame = video_frame(buf, p)
video_frames.append(frame)
# check if [SIDE_DATA] exists
line = peek_line(buf)[:-1]
if line == '[SIDE_DATA]':
_ = buf.readline() # read the header [SIDE_DATA]
_ = side_data(buf, p)
buf.readline() # read the end tag [/SIDE_DATA]
else:
frame = audio_frame(buf, p)
audio_frames.append(frame)
buf.readline() # read the end tag [/FRAME]
return audio_frames, video_frames
def main():
audio_frames, video_frames = ffprobe_video('s01.mpg')
assert len(video_frames) == 3890
if __name__ == '__main__':
main()
| [
"subprocess.Popen",
"cStringIO.StringIO"
] | [((4861, 4934), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n', (4877, 4934), False, 'import subprocess\n'), ((5056, 5079), 'cStringIO.StringIO', 'cStringIO.StringIO', (['out'], {}), '(out)\n', (5074, 5079), False, 'import cStringIO\n')] |
import uuid
import pytest
from pydantic import BaseModel
from daemon.models import DaemonID
VALID_JTYPES = ['jflow', 'jdeployment', 'jpod', 'jworkspace', 'jnetwork']
def test_jtype_only():
for jtype in VALID_JTYPES:
d = DaemonID(jtype)
assert d.jtype == jtype
assert uuid.UUID(d.jid)
def test_jtype_jid():
an_id = uuid.uuid4()
for jtype in VALID_JTYPES:
d = DaemonID(f'{jtype}-{an_id}')
assert d.jtype == jtype
assert d.jid == str(an_id)
assert d.tag == f'{jtype}:{an_id}'
def test_id_raise():
an_id = uuid.uuid4()
with pytest.raises(TypeError):
DaemonID(an_id)
with pytest.raises(TypeError):
DaemonID('invalid')
with pytest.raises(TypeError):
DaemonID(f'invalid-{an_id}')
with pytest.raises(TypeError):
DaemonID(f'jflow-invalid')
def test_validate_id_in_pydantic_model():
an_id = uuid.uuid4()
class Model(BaseModel):
id: DaemonID
for jtype in VALID_JTYPES:
Model(id=jtype)
Model(id=f'{jtype}-{an_id}')
with pytest.raises(ValueError):
Model(id=f'invalid-{an_id}')
with pytest.raises(ValueError):
Model(id=an_id)
with pytest.raises(ValueError):
Model(id='a-string')
| [
"uuid.UUID",
"pytest.raises",
"daemon.models.DaemonID",
"uuid.uuid4"
] | [((352, 364), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (362, 364), False, 'import uuid\n'), ((582, 594), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (592, 594), False, 'import uuid\n'), ((918, 930), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (928, 930), False, 'import uuid\n'), ((236, 251), 'daemon.models.DaemonID', 'DaemonID', (['jtype'], {}), '(jtype)\n', (244, 251), False, 'from daemon.models import DaemonID\n'), ((299, 315), 'uuid.UUID', 'uuid.UUID', (['d.jid'], {}), '(d.jid)\n', (308, 315), False, 'import uuid\n'), ((408, 436), 'daemon.models.DaemonID', 'DaemonID', (['f"""{jtype}-{an_id}"""'], {}), "(f'{jtype}-{an_id}')\n", (416, 436), False, 'from daemon.models import DaemonID\n'), ((604, 628), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (617, 628), False, 'import pytest\n'), ((638, 653), 'daemon.models.DaemonID', 'DaemonID', (['an_id'], {}), '(an_id)\n', (646, 653), False, 'from daemon.models import DaemonID\n'), ((664, 688), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (677, 688), False, 'import pytest\n'), ((698, 717), 'daemon.models.DaemonID', 'DaemonID', (['"""invalid"""'], {}), "('invalid')\n", (706, 717), False, 'from daemon.models import DaemonID\n'), ((728, 752), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (741, 752), False, 'import pytest\n'), ((762, 790), 'daemon.models.DaemonID', 'DaemonID', (['f"""invalid-{an_id}"""'], {}), "(f'invalid-{an_id}')\n", (770, 790), False, 'from daemon.models import DaemonID\n'), ((801, 825), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (814, 825), False, 'import pytest\n'), ((835, 861), 'daemon.models.DaemonID', 'DaemonID', (['f"""jflow-invalid"""'], {}), "(f'jflow-invalid')\n", (843, 861), False, 'from daemon.models import DaemonID\n'), ((1084, 1109), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1097, 1109), False, 'import pytest\n'), ((1158, 1183), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1171, 1183), False, 'import pytest\n'), ((1219, 1244), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1232, 1244), False, 'import pytest\n')] |
from django.urls import path, include
from . import views
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register('accounts', views.AccountViewSet)
router.register('parents', views.ParentAccountViewSet)
router.register('contacts', views.ContactViewSet)
router.register('opportunities', views.OpportunityViewSet)
router.register('technology', views.TechnologyViewSet)
urlpatterns = [
path("", include(router.urls))
] | [
"rest_framework.routers.DefaultRouter",
"django.urls.include"
] | [((118, 133), 'rest_framework.routers.DefaultRouter', 'DefaultRouter', ([], {}), '()\n', (131, 133), False, 'from rest_framework.routers import DefaultRouter\n'), ((435, 455), 'django.urls.include', 'include', (['router.urls'], {}), '(router.urls)\n', (442, 455), False, 'from django.urls import path, include\n')] |
import json
from django.contrib.auth.models import User
from django.urls import reverse
from rest_framework import status
from rest_framework.authtoken.models import Token
from rest_framework.test import APITestCase
from profiles.api import serializers
from . import models
class RegistrationTestCase(APITestCase):
def test_registration(self):
data = {"username": "testcase", "email":"<EMAIL>",
"password1": "<PASSWORD>", "password2": "<PASSWORD>"}
response = self.client.post("/api/v1/rest-auth/registration/", data=data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
class ProfileViewSetTestCase(APITestCase):
list_url = reverse('profile-list')
def setUp(self):
self.user = User.objects.create_user(username="test1",
password="<PASSWORD>")
self.token = Token.objects.create(user=self.user)
self.api_authentication()
def api_authentication(self):
self.client.credentials(HTTP_AUTHORIZATION="Token " + self.token.key)
def test_profile_list_authenticated(self):
response = self.client.get(self.list_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_profile_list_un_authenitcated(self):
self.client.force_authenticate(user=None)
response = self.client.get(self.list_url)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_profile_detail_retrieve(self):
response = self.client.get(reverse('profile-detail', kwargs={"pk": 1}))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["user"], "test1")
def test_profile_update_by_owner(self):
response = self.client.put(reverse('profile-detail', kwargs={"pk": 1}),
{"city": "TestCity", "bio": "TestBio"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(json.loads(response.content),
{"id": 1, "user": "test1", "bio": "TestBio",
"city": "TestCity", "avatar":None})
def test_profile_update_by_random_user(self):
random_user = User.objects.create_user(username="random",
password="<PASSWORD>")
self.client.force_authenticate(user=random_user)
response = self.client.put(reverse("profile-detail", kwargs={"pk":1}),
{"bio": "Bio Hacked!"})
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
class ProfileStatusViewSetTestCase(APITestCase):
url = reverse("status-list")
def setUp(self):
self.user = User.objects.create_user(username="test1",
password="<PASSWORD>")
self.status = models.ProfileStatus.objects.create(user_profile=self.user.profile,
status_content="status test")
self.token = Token.objects.create(user=self.user)
self.api_authentication()
def api_authentication(self):
self.client.credentials(HTTP_AUTHORIZATION="Token " + self.token.key)
def test_status_list_authenticated(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_status_list_un_authenticated(self):
self.client.force_authenticate(user=None)
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_status_create(self):
data = {"status_content": "a new status!"}
response = self.client.post(self.url, data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data["user_profile"], "test1")
self.assertEqual(response.data["status_content"], "a new status!")
def test_single_status_retrieve(self):
serializer_data = serializers.ProfileStatusSerializer(instance=self.status).data
response = self.client.get(reverse('status-detail', kwargs={"pk":1}))
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_data = json.loads(response.content)
self.assertEqual(serializer_data, response_data)
def test_status_update_owner(self):
data = {"status_content": "content updated"}
response = self.client.put(reverse("status-detail", kwargs={"pk":1}),
data=data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["status_content"], "content updated")
def test_status_update_random_user(self):
random_user = User.objects.create_user(username="random",
password="<PASSWORD>")
self.client.force_authenticate(user=random_user)
data = {"status_content": "Data hacked!"}
response = self.client.put(reverse("status-detail", kwargs={"pk":1}),
data=data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) | [
"json.loads",
"profiles.api.serializers.ProfileStatusSerializer",
"django.urls.reverse",
"django.contrib.auth.models.User.objects.create_user",
"rest_framework.authtoken.models.Token.objects.create"
] | [((696, 719), 'django.urls.reverse', 'reverse', (['"""profile-list"""'], {}), "('profile-list')\n", (703, 719), False, 'from django.urls import reverse\n'), ((2704, 2726), 'django.urls.reverse', 'reverse', (['"""status-list"""'], {}), "('status-list')\n", (2711, 2726), False, 'from django.urls import reverse\n'), ((762, 827), 'django.contrib.auth.models.User.objects.create_user', 'User.objects.create_user', ([], {'username': '"""test1"""', 'password': '"""<PASSWORD>"""'}), "(username='test1', password='<PASSWORD>')\n", (786, 827), False, 'from django.contrib.auth.models import User\n'), ((894, 930), 'rest_framework.authtoken.models.Token.objects.create', 'Token.objects.create', ([], {'user': 'self.user'}), '(user=self.user)\n', (914, 930), False, 'from rest_framework.authtoken.models import Token\n'), ((2250, 2316), 'django.contrib.auth.models.User.objects.create_user', 'User.objects.create_user', ([], {'username': '"""random"""', 'password': '"""<PASSWORD>"""'}), "(username='random', password='<PASSWORD>')\n", (2274, 2316), False, 'from django.contrib.auth.models import User\n'), ((2769, 2834), 'django.contrib.auth.models.User.objects.create_user', 'User.objects.create_user', ([], {'username': '"""test1"""', 'password': '"""<PASSWORD>"""'}), "(username='test1', password='<PASSWORD>')\n", (2793, 2834), False, 'from django.contrib.auth.models import User\n'), ((3077, 3113), 'rest_framework.authtoken.models.Token.objects.create', 'Token.objects.create', ([], {'user': 'self.user'}), '(user=self.user)\n', (3097, 3113), False, 'from rest_framework.authtoken.models import Token\n'), ((4291, 4319), 'json.loads', 'json.loads', (['response.content'], {}), '(response.content)\n', (4301, 4319), False, 'import json\n'), ((4808, 4874), 'django.contrib.auth.models.User.objects.create_user', 'User.objects.create_user', ([], {'username': '"""random"""', 'password': '"""<PASSWORD>"""'}), "(username='random', password='<PASSWORD>')\n", (4832, 4874), False, 'from django.contrib.auth.models import User\n'), ((1552, 1595), 'django.urls.reverse', 'reverse', (['"""profile-detail"""'], {'kwargs': "{'pk': 1}"}), "('profile-detail', kwargs={'pk': 1})\n", (1559, 1595), False, 'from django.urls import reverse\n'), ((1801, 1844), 'django.urls.reverse', 'reverse', (['"""profile-detail"""'], {'kwargs': "{'pk': 1}"}), "('profile-detail', kwargs={'pk': 1})\n", (1808, 1844), False, 'from django.urls import reverse\n'), ((2018, 2046), 'json.loads', 'json.loads', (['response.content'], {}), '(response.content)\n', (2028, 2046), False, 'import json\n'), ((2457, 2500), 'django.urls.reverse', 'reverse', (['"""profile-detail"""'], {'kwargs': "{'pk': 1}"}), "('profile-detail', kwargs={'pk': 1})\n", (2464, 2500), False, 'from django.urls import reverse\n'), ((4059, 4116), 'profiles.api.serializers.ProfileStatusSerializer', 'serializers.ProfileStatusSerializer', ([], {'instance': 'self.status'}), '(instance=self.status)\n', (4094, 4116), False, 'from profiles.api import serializers\n'), ((4157, 4199), 'django.urls.reverse', 'reverse', (['"""status-detail"""'], {'kwargs': "{'pk': 1}"}), "('status-detail', kwargs={'pk': 1})\n", (4164, 4199), False, 'from django.urls import reverse\n'), ((4506, 4548), 'django.urls.reverse', 'reverse', (['"""status-detail"""'], {'kwargs': "{'pk': 1}"}), "('status-detail', kwargs={'pk': 1})\n", (4513, 4548), False, 'from django.urls import reverse\n'), ((5064, 5106), 'django.urls.reverse', 'reverse', (['"""status-detail"""'], {'kwargs': "{'pk': 1}"}), "('status-detail', kwargs={'pk': 1})\n", (5071, 5106), False, 'from django.urls import reverse\n')] |
#!/usr/bin/env python3
# %%
import logging
import itertools
def part1(data):
orders = [(int(d) for d in line.split('x')) for line in data.splitlines()
if line.strip()]
total = 0
for l, w, h in orders:
area = (2 * l * w) + (2 * w * h) + (2 * h * l)
slack = min(a * b for a, b in itertools.combinations((l, w, h), 2))
total += (area + slack)
return total
###############################################################################
def part2(data):
orders = [(int(d) for d in line.split('x')) for line in data.splitlines()
if line.strip()]
total = 0
for l, w, h in orders:
bow = (l * w * h)
ribbon = min(
(2 * a + 2 * b) for a, b in itertools.combinations((l, w, h), 2))
total += (bow + ribbon)
return total
###############################################################################
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='AoC 2015-02 Solution')
parser.add_argument(
'INPUT',
nargs='?',
default="input.txt",
help='Input file to run the solution with.')
parser.add_argument(
'-v', '--verbose', action="store_true", help='Turn on verbose logging.')
parser.add_argument(
'-l', '--line', action="store_true", help='Parse input line by line.')
parser.add_argument(
'-1',
'--no-part1',
action="store_true",
help='Exclude Part 1 solution from run.')
parser.add_argument(
'-2',
'--no-part2',
action="store_true",
help='Exclude Part 2 solution from run.')
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
with open(args.INPUT) as f:
data = f.read()
if not args.line:
if not args.no_part1:
print("Part 1: %r" % part1(data))
if not args.no_part2:
print("Part 2: %r" % part2(data))
else:
if not args.no_part1:
for line in data.splitlines():
print("Part 1: %r = %r" % (line, part1(line)))
if not args.no_part2:
for line in data.splitlines():
print("Part 2: %r = %r" % (line, part2(line)))
| [
"logging.basicConfig",
"itertools.combinations",
"argparse.ArgumentParser"
] | [((985, 1044), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""AoC 2015-02 Solution"""'}), "(description='AoC 2015-02 Solution')\n", (1008, 1044), False, 'import argparse\n'), ((1739, 1779), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (1758, 1779), False, 'import logging\n'), ((323, 359), 'itertools.combinations', 'itertools.combinations', (['(l, w, h)', '(2)'], {}), '((l, w, h), 2)\n', (345, 359), False, 'import itertools\n'), ((752, 788), 'itertools.combinations', 'itertools.combinations', (['(l, w, h)', '(2)'], {}), '((l, w, h), 2)\n', (774, 788), False, 'import itertools\n')] |
from typing import Dict
from lightbus.exceptions import (
UnknownApi,
InvalidApiRegistryEntry,
EventNotFound,
MisconfiguredApiOptions,
InvalidApiEventConfiguration,
)
__all__ = ["Api", "Event"]
class ApiRegistry:
def __init__(self):
self._apis: Dict[str, Api] = dict()
def add(self, api: "Api"):
if isinstance(api, type):
raise InvalidApiRegistryEntry(
"An attempt was made to add a type to the API registry. This "
"is probably because you are trying to add the API class, rather "
"than an instance of the API class.\n"
"\n"
"Use bus.client.register_api(MyApi()), rather than bus.client.register_api(MyApi)"
)
self._apis[api.meta.name] = api
def get(self, name) -> "Api":
try:
return self._apis[name]
except KeyError:
raise UnknownApi(
"An API named '{}' was requested from the registry but the "
"registry does not recognise it. Maybe the incorrect API name "
"was specified, or maybe the API has not been registered.".format(name)
)
def remove(self, name) -> None:
try:
del self._apis[name]
except KeyError:
raise UnknownApi(
"An attempt was made to remove an API named '{}' from the registry, but the API "
"could not be found. Maybe the incorrect API name "
"was specified, or maybe the API has not been registered.".format(name)
)
def public(self):
return [api for api in self._apis.values() if not api.meta.internal]
def internal(self):
return [api for api in self._apis.values() if api.meta.internal]
def all(self):
return list(self._apis.values())
def names(self):
return list(self._apis.keys())
class ApiOptions:
name: str
internal: bool = False
version: int = 1
def __init__(self, options):
for k, v in options.items():
if not k.startswith("_"):
setattr(self, k, v)
class ApiMetaclass(type):
""" API Metaclass
Validates options in the API's Meta class and populates the
API class' `meta` attribute.
"""
def __init__(cls, name, bases=None, dict_=None):
is_api_base_class = name == "Api" and not bases
if is_api_base_class:
super(ApiMetaclass, cls).__init__(name, bases, dict_)
else:
options = dict_.get("Meta", None)
if options is None:
raise MisconfiguredApiOptions(
f"API class {name} does not contain a class named 'Meta'. Each API definition "
f"must contain a child class named 'Meta' which can contain configurations options. "
f"For example, the 'name' option is required and specifies "
f"the name used to access the API on the bus."
)
cls.sanity_check_options(name, options)
cls.meta = ApiOptions(cls.Meta.__dict__.copy())
super(ApiMetaclass, cls).__init__(name, bases, dict_)
if cls.meta.name == "default" or cls.meta.name.startswith("default."):
raise MisconfiguredApiOptions(
f"API class {name} is named 'default', or starts with 'default.'. "
f"This is a reserved name and is not allowed, please change it to something else."
)
def sanity_check_options(cls, name, options):
if not getattr(options, "name", None):
raise MisconfiguredApiOptions(
"API class {} does not specify a name option with its "
"'Meta' options."
"".format(name)
)
class Api(metaclass=ApiMetaclass):
class Meta:
name = None
def get_event(self, name) -> "Event":
event = getattr(self, name, None)
if isinstance(event, Event):
return event
else:
raise EventNotFound("Event named {}.{} could not be found".format(self, name))
def __str__(self):
return self.meta.name
class Event:
def __init__(self, parameters=tuple()):
# Ensure you update the __copy__() method if adding other instance variables below
if isinstance(parameters, str):
raise InvalidApiEventConfiguration(
f"You appear to have passed a string value of {repr(parameters)} "
f"for your API's event's parameters. This should be a list or a tuple, "
f"not a string. You probably missed a comma when defining your "
f"tuple of parameter names."
)
self.parameters = parameters
| [
"lightbus.exceptions.InvalidApiRegistryEntry",
"lightbus.exceptions.MisconfiguredApiOptions"
] | [((390, 671), 'lightbus.exceptions.InvalidApiRegistryEntry', 'InvalidApiRegistryEntry', (['"""An attempt was made to add a type to the API registry. This is probably because you are trying to add the API class, rather than an instance of the API class.\n\nUse bus.client.register_api(MyApi()), rather than bus.client.register_api(MyApi)"""'], {}), '(\n """An attempt was made to add a type to the API registry. This is probably because you are trying to add the API class, rather than an instance of the API class.\n\nUse bus.client.register_api(MyApi()), rather than bus.client.register_api(MyApi)"""\n )\n', (413, 671), False, 'from lightbus.exceptions import UnknownApi, InvalidApiRegistryEntry, EventNotFound, MisconfiguredApiOptions, InvalidApiEventConfiguration\n'), ((2634, 2930), 'lightbus.exceptions.MisconfiguredApiOptions', 'MisconfiguredApiOptions', (['f"""API class {name} does not contain a class named \'Meta\'. Each API definition must contain a child class named \'Meta\' which can contain configurations options. For example, the \'name\' option is required and specifies the name used to access the API on the bus."""'], {}), '(\n f"API class {name} does not contain a class named \'Meta\'. Each API definition must contain a child class named \'Meta\' which can contain configurations options. For example, the \'name\' option is required and specifies the name used to access the API on the bus."\n )\n', (2657, 2930), False, 'from lightbus.exceptions import UnknownApi, InvalidApiRegistryEntry, EventNotFound, MisconfiguredApiOptions, InvalidApiEventConfiguration\n'), ((3315, 3496), 'lightbus.exceptions.MisconfiguredApiOptions', 'MisconfiguredApiOptions', (['f"""API class {name} is named \'default\', or starts with \'default.\'. This is a reserved name and is not allowed, please change it to something else."""'], {}), '(\n f"API class {name} is named \'default\', or starts with \'default.\'. This is a reserved name and is not allowed, please change it to something else."\n )\n', (3338, 3496), False, 'from lightbus.exceptions import UnknownApi, InvalidApiRegistryEntry, EventNotFound, MisconfiguredApiOptions, InvalidApiEventConfiguration\n')] |
from django import forms
from base.models import Developer
class RegistrationForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput())
password2 = forms.CharField(widget=forms.PasswordInput())
class Meta:
model = Developer
fields = 'username', 'password', '<PASSWORD>'
def __init__(self, *args, **kwargs):
super(RegistrationForm, self).__init__(*args, **kwargs)
for field in self.fields:
self.fields[field].widget.attrs['class'] = 'form-control'
| [
"django.forms.PasswordInput"
] | [((141, 162), 'django.forms.PasswordInput', 'forms.PasswordInput', ([], {}), '()\n', (160, 162), False, 'from django import forms\n'), ((203, 224), 'django.forms.PasswordInput', 'forms.PasswordInput', ([], {}), '()\n', (222, 224), False, 'from django import forms\n')] |
import logging
from aioscrapy.utils.reqser import request_to_dict, request_from_dict
from .serializ import PickleCompat
logger = logging.getLogger(__name__)
_to_str = lambda x: x if isinstance(x, str) else str(x)
class Base(object):
"""Per-spider base queue class"""
def __init__(self, server, spider, key, serializer=None):
"""Initialize per-spider redis queue.
Parameters
----------
server : StrictRedis
Redis client instance.
spider : Spider
Scrapy spider instance.
key: str
Redis key where to put and get messages.
serializer : object
Serializer object with ``loads`` and ``dumps`` methods.
"""
if serializer is None:
# Backward compatibility.
# TODO: deprecate pickle.
serializer = PickleCompat
if not hasattr(serializer, 'loads'):
raise TypeError("serializer does not implement 'loads' function: %r"
% serializer)
if not hasattr(serializer, 'dumps'):
raise TypeError("serializer '%s' does not implement 'dumps' function: %r"
% serializer)
self.server = server
self.spider = spider
self.key = key % {'spider': spider.name}
self.serializer = serializer
def _encode_request(self, request):
"""Encode a request object"""
obj = request_to_dict(request, self.spider)
return self.serializer.dumps(obj)
def _decode_request(self, encoded_request):
"""Decode an request previously encoded"""
obj = self.serializer.loads(encoded_request)
return request_from_dict(obj, self.spider)
def __len__(self):
"""Return the length of the queue"""
raise Exception('please use len()')
async def len(self):
raise NotImplementedError
async def push(self, request):
"""Push a request"""
raise NotImplementedError
async def pop(self, timeout=0):
"""Pop a request"""
raise NotImplementedError
async def clear(self):
"""Clear queue/stack"""
await self.server.delete(self.key)
class FifoQueue(Base):
"""Per-spider FIFO queue"""
async def len(self):
return await self.server.llen(self.key)
async def push(self, request):
"""Push a request"""
await self.server.lpush(self.key, self._encode_request(request))
async def pop(self, timeout=0):
"""Pop a request"""
if timeout > 0:
data = await self.server.brpop(self.key, timeout)
if isinstance(data, tuple):
data = data[1]
else:
data = await self.server.rpop(self.key)
if data:
return self._decode_request(data)
class PriorityQueue(Base):
"""Per-spider priority queue abstraction using redis' sorted set"""
async def len(self):
return await self.server.zcard(self.key)
async def push(self, request):
"""Push a request"""
data = self._encode_request(request)
score = request.priority
# We don't use zadd method as the order of arguments change depending on
# whether the class is Redis or StrictRedis, and the option of using
# kwargs only accepts strings, not bytes.
await self.server.zadd(self.key, {data: score})
async def pop(self, timeout=0):
"""
Pop a request
timeout not support in this queue class
"""
# use atomic range/remove using multi/exec
async with self.server.pipeline(transaction=True) as pipe:
results, count = await (
pipe.zrange(self.key, 0, 0)
.zremrangebyrank(self.key, 0, 0)
.execute()
)
if results:
return self._decode_request(results[0])
class LifoQueue(Base):
"""Per-spider LIFO queue."""
async def len(self):
return await self.server.llen(self.key)
async def push(self, request):
"""Push a request"""
await self.server.lpush(self.key, self._encode_request(request))
async def pop(self, timeout=0):
"""Pop a request"""
if timeout > 0:
data = await self.server.blpop(self.key, timeout)
if isinstance(data, tuple):
data = data[1]
else:
data = await self.server.lpop(self.key)
if data:
return self._decode_request(data)
# TODO: Deprecate the use of these names.
SpiderQueue = FifoQueue
SpiderStack = LifoQueue
SpiderPriorityQueue = PriorityQueue
| [
"logging.getLogger",
"aioscrapy.utils.reqser.request_to_dict",
"aioscrapy.utils.reqser.request_from_dict"
] | [((138, 165), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (155, 165), False, 'import logging\n'), ((1499, 1536), 'aioscrapy.utils.reqser.request_to_dict', 'request_to_dict', (['request', 'self.spider'], {}), '(request, self.spider)\n', (1514, 1536), False, 'from aioscrapy.utils.reqser import request_to_dict, request_from_dict\n'), ((1753, 1788), 'aioscrapy.utils.reqser.request_from_dict', 'request_from_dict', (['obj', 'self.spider'], {}), '(obj, self.spider)\n', (1770, 1788), False, 'from aioscrapy.utils.reqser import request_to_dict, request_from_dict\n')] |
#!/usr/bin/python
import serial
import sys
import time
def main():
while True:
ser = serial.Serial('/dev/ttyACM0', 9600, timeout=5000)
last_time = time.time()
while True:
tag = ser.readline().strip()
new_time = time.time()
print('%s %s'%(tag, new_time - last_time))
last_time = new_time
if __name__ == '__main__':
main()
| [
"serial.Serial",
"time.time"
] | [((99, 148), 'serial.Serial', 'serial.Serial', (['"""/dev/ttyACM0"""', '(9600)'], {'timeout': '(5000)'}), "('/dev/ttyACM0', 9600, timeout=5000)\n", (112, 148), False, 'import serial\n'), ((169, 180), 'time.time', 'time.time', ([], {}), '()\n', (178, 180), False, 'import time\n'), ((265, 276), 'time.time', 'time.time', ([], {}), '()\n', (274, 276), False, 'import time\n')] |
#!/usr/bin/env python3
#
# Copyright 2017 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for output archive plugin."""
import copy
import glob
import logging
import os
import resource
import shutil
import tarfile
import tempfile
import time
import unittest
import psutil
from cros.factory.instalog import datatypes
from cros.factory.instalog import log_utils
from cros.factory.instalog import plugin_sandbox
from cros.factory.instalog import testing
class TestOutputArchive(unittest.TestCase):
def setUp(self):
self.core = testing.MockCore()
self.stream = self.core.GetStream(0)
self.tmp_dir = tempfile.mkdtemp(prefix='output_archive_unittest_')
self.event = datatypes.Event({'plugin': 'archive'})
def tearDown(self):
self.core.Close()
shutil.rmtree(self.tmp_dir)
def _GetMemoryUsage(self):
"""Returns current process's memory usage in bytes."""
return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss * 1024
def testMemoryUsage(self):
big_event = datatypes.Event({'1mb': 'x' * 1024 * 1024})
event_size = len(big_event.Serialize())
config = {
'interval': 1000, # arbitrary long time
'threshold_size': 1024 * 1024 * 1024, # arbitrary large value
}
sandbox = plugin_sandbox.PluginSandbox(
'output_archive', config=config,
data_dir=self.tmp_dir, core_api=self.core)
sandbox.Start(True)
mem_usage_start = self._GetMemoryUsage()
logging.info('Initial memory usage: %d', mem_usage_start)
# additional_memory = big_event(1mb) * 10 events * 20 iterations = ~200mb
# maximum_memory = (original_memory + additional_memory) plus 10% padding
mem_usage_max = (mem_usage_start + (event_size * 10 * 20)) * 1.1
for unused_i in range(20):
events = [copy.deepcopy(big_event) for unused_j in range(10)]
self.stream.Queue(events)
sandbox.Flush(1, False) # trigger archive creation
while not self.stream.Empty():
mem_usage = self._GetMemoryUsage()
logging.info('Current memory usage: %d/%d', mem_usage, mem_usage_max)
if mem_usage >= mem_usage_max:
# The test has failed, but we need to interrupt the archive plugin
# and get it to stop as quickly as possible.
# Stop new events from being accessed.
del self.core.streams[0]
# Force any open file handles shut so the plugin stops writing
# to the archive on disk.
proc = psutil.Process()
for f in proc.get_open_files():
os.close(f.fd)
# Manually set the plugin state to STOPPING and advance into this
# state.
# pylint: disable=protected-access
sandbox._state = plugin_sandbox.STOPPING
sandbox.AdvanceState(True)
# Once the plugin has really stopped, report our error.
self.fail('Memory usage exceeded: %d/%d' % (mem_usage, mem_usage_max))
time.sleep(0.1)
# pylint: disable=protected-access
sandbox._state = plugin_sandbox.STOPPING
sandbox.AdvanceState(True)
def testOneEvent(self):
config = {
'interval': 1}
sandbox = plugin_sandbox.PluginSandbox(
'output_archive', config=config,
data_dir=self.tmp_dir, core_api=self.core)
sandbox.Start(True)
# pylint: disable=protected-access
plugin = sandbox._plugin
self.stream.Queue([self.event])
plugin.PrepareAndProcess()
sandbox.Flush(2, True)
sandbox.Stop()
# Inspect the disk archive.
archive_path = glob.glob(os.path.join(self.tmp_dir, 'InstalogEvents*'))[0]
with tarfile.open(archive_path, 'r:gz') as tar:
events_member = [n for n in tar.getnames() if 'events.json' in n][0]
events_file = tar.extractfile(events_member)
lines = events_file.readlines()
self.assertEqual(1, len(lines))
event = datatypes.Event.Deserialize(lines[0])
self.assertEqual(event, self.event)
if __name__ == '__main__':
log_utils.InitLogging(log_utils.GetStreamHandler(logging.INFO))
unittest.main()
| [
"tarfile.open",
"copy.deepcopy",
"cros.factory.instalog.datatypes.Event",
"resource.getrusage",
"os.close",
"psutil.Process",
"os.path.join",
"time.sleep",
"cros.factory.instalog.plugin_sandbox.PluginSandbox",
"cros.factory.instalog.log_utils.GetStreamHandler",
"tempfile.mkdtemp",
"cros.factor... | [((4088, 4103), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4101, 4103), False, 'import unittest\n'), ((650, 668), 'cros.factory.instalog.testing.MockCore', 'testing.MockCore', ([], {}), '()\n', (666, 668), False, 'from cros.factory.instalog import testing\n'), ((729, 780), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'prefix': '"""output_archive_unittest_"""'}), "(prefix='output_archive_unittest_')\n", (745, 780), False, 'import tempfile\n'), ((798, 836), 'cros.factory.instalog.datatypes.Event', 'datatypes.Event', (["{'plugin': 'archive'}"], {}), "({'plugin': 'archive'})\n", (813, 836), False, 'from cros.factory.instalog import datatypes\n'), ((886, 913), 'shutil.rmtree', 'shutil.rmtree', (['self.tmp_dir'], {}), '(self.tmp_dir)\n', (899, 913), False, 'import shutil\n'), ((1118, 1161), 'cros.factory.instalog.datatypes.Event', 'datatypes.Event', (["{'1mb': 'x' * 1024 * 1024}"], {}), "({'1mb': 'x' * 1024 * 1024})\n", (1133, 1161), False, 'from cros.factory.instalog import datatypes\n'), ((1361, 1470), 'cros.factory.instalog.plugin_sandbox.PluginSandbox', 'plugin_sandbox.PluginSandbox', (['"""output_archive"""'], {'config': 'config', 'data_dir': 'self.tmp_dir', 'core_api': 'self.core'}), "('output_archive', config=config, data_dir=self\n .tmp_dir, core_api=self.core)\n", (1389, 1470), False, 'from cros.factory.instalog import plugin_sandbox\n'), ((1557, 1614), 'logging.info', 'logging.info', (['"""Initial memory usage: %d"""', 'mem_usage_start'], {}), "('Initial memory usage: %d', mem_usage_start)\n", (1569, 1614), False, 'import logging\n'), ((3204, 3313), 'cros.factory.instalog.plugin_sandbox.PluginSandbox', 'plugin_sandbox.PluginSandbox', (['"""output_archive"""'], {'config': 'config', 'data_dir': 'self.tmp_dir', 'core_api': 'self.core'}), "('output_archive', config=config, data_dir=self\n .tmp_dir, core_api=self.core)\n", (3232, 3313), False, 'from cros.factory.instalog import plugin_sandbox\n'), ((4044, 4084), 'cros.factory.instalog.log_utils.GetStreamHandler', 'log_utils.GetStreamHandler', (['logging.INFO'], {}), '(logging.INFO)\n', (4070, 4084), False, 'from cros.factory.instalog import log_utils\n'), ((2110, 2179), 'logging.info', 'logging.info', (['"""Current memory usage: %d/%d"""', 'mem_usage', 'mem_usage_max'], {}), "('Current memory usage: %d/%d', mem_usage, mem_usage_max)\n", (2122, 2179), False, 'import logging\n'), ((2994, 3009), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (3004, 3009), False, 'import time\n'), ((3652, 3686), 'tarfile.open', 'tarfile.open', (['archive_path', '"""r:gz"""'], {}), "(archive_path, 'r:gz')\n", (3664, 3686), False, 'import tarfile\n'), ((3911, 3948), 'cros.factory.instalog.datatypes.Event.Deserialize', 'datatypes.Event.Deserialize', (['lines[0]'], {}), '(lines[0])\n', (3938, 3948), False, 'from cros.factory.instalog import datatypes\n'), ((1014, 1054), 'resource.getrusage', 'resource.getrusage', (['resource.RUSAGE_SELF'], {}), '(resource.RUSAGE_SELF)\n', (1032, 1054), False, 'import resource\n'), ((1887, 1911), 'copy.deepcopy', 'copy.deepcopy', (['big_event'], {}), '(big_event)\n', (1900, 1911), False, 'import copy\n'), ((2545, 2561), 'psutil.Process', 'psutil.Process', ([], {}), '()\n', (2559, 2561), False, 'import psutil\n'), ((3593, 3638), 'os.path.join', 'os.path.join', (['self.tmp_dir', '"""InstalogEvents*"""'], {}), "(self.tmp_dir, 'InstalogEvents*')\n", (3605, 3638), False, 'import os\n'), ((2612, 2626), 'os.close', 'os.close', (['f.fd'], {}), '(f.fd)\n', (2620, 2626), False, 'import os\n')] |
import json
import datetime as dt
import platform
from ..pydata_lib import test_glob # example of relative import
def print_machine_stats():
try:
from ..pydata_lib import test_glob
except RuntimeError:
print('relative import failed')
print(dt.datetime.now())
print(platform.version())
print(platform.uname())
print(platform.machine())
print(platform.system())
print(platform.processor())
print(platform.system())
| [
"platform.version",
"platform.uname",
"datetime.datetime.now",
"platform.system",
"platform.processor",
"platform.machine"
] | [((271, 288), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (286, 288), True, 'import datetime as dt\n'), ((300, 318), 'platform.version', 'platform.version', ([], {}), '()\n', (316, 318), False, 'import platform\n'), ((330, 346), 'platform.uname', 'platform.uname', ([], {}), '()\n', (344, 346), False, 'import platform\n'), ((358, 376), 'platform.machine', 'platform.machine', ([], {}), '()\n', (374, 376), False, 'import platform\n'), ((388, 405), 'platform.system', 'platform.system', ([], {}), '()\n', (403, 405), False, 'import platform\n'), ((417, 437), 'platform.processor', 'platform.processor', ([], {}), '()\n', (435, 437), False, 'import platform\n'), ((449, 466), 'platform.system', 'platform.system', ([], {}), '()\n', (464, 466), False, 'import platform\n')] |
import itertools
from unittest.mock import patch
import pytest
from pandas_addons.register import DEFAULT_PANDAS_OBJECTS, accessors, register
class TestRegister:
def test_should_return_input_function(self):
def accessor():
pass
assert accessor == register()(accessor)
@patch.dict(accessors, {}, clear=True)
def test_should_use_default_value_when_no_args(self):
def accessor():
pass
register()(accessor)
assert accessors == {"accessor": {pdo: accessor for pdo in DEFAULT_PANDAS_OBJECTS}}
@patch.dict(accessors, {}, clear=True)
def test_should_register_when_register_is_called_on_decorated(self):
def accessor():
pass
register(accessor)
assert accessors == {"accessor": {pdo: accessor for pdo in DEFAULT_PANDAS_OBJECTS}}
@patch.dict(accessors, {}, clear=True)
@pytest.mark.parametrize(
"pandas_objects",
itertools.chain.from_iterable(
itertools.combinations(DEFAULT_PANDAS_OBJECTS, i + 1)
for i in range(len(DEFAULT_PANDAS_OBJECTS))
),
)
def test_should_register_given_pandas_object(self, pandas_objects):
def accessor():
pass
register(*pandas_objects)(accessor)
assert accessors == {"accessor": {pdo: accessor for pdo in pandas_objects}}
| [
"itertools.combinations",
"pandas_addons.register.register",
"unittest.mock.patch.dict"
] | [((311, 348), 'unittest.mock.patch.dict', 'patch.dict', (['accessors', '{}'], {'clear': '(True)'}), '(accessors, {}, clear=True)\n', (321, 348), False, 'from unittest.mock import patch\n'), ((577, 614), 'unittest.mock.patch.dict', 'patch.dict', (['accessors', '{}'], {'clear': '(True)'}), '(accessors, {}, clear=True)\n', (587, 614), False, 'from unittest.mock import patch\n'), ((856, 893), 'unittest.mock.patch.dict', 'patch.dict', (['accessors', '{}'], {'clear': '(True)'}), '(accessors, {}, clear=True)\n', (866, 893), False, 'from unittest.mock import patch\n'), ((738, 756), 'pandas_addons.register.register', 'register', (['accessor'], {}), '(accessor)\n', (746, 756), False, 'from pandas_addons.register import DEFAULT_PANDAS_OBJECTS, accessors, register\n'), ((457, 467), 'pandas_addons.register.register', 'register', ([], {}), '()\n', (465, 467), False, 'from pandas_addons.register import DEFAULT_PANDAS_OBJECTS, accessors, register\n'), ((1250, 1275), 'pandas_addons.register.register', 'register', (['*pandas_objects'], {}), '(*pandas_objects)\n', (1258, 1275), False, 'from pandas_addons.register import DEFAULT_PANDAS_OBJECTS, accessors, register\n'), ((284, 294), 'pandas_addons.register.register', 'register', ([], {}), '()\n', (292, 294), False, 'from pandas_addons.register import DEFAULT_PANDAS_OBJECTS, accessors, register\n'), ((1001, 1054), 'itertools.combinations', 'itertools.combinations', (['DEFAULT_PANDAS_OBJECTS', '(i + 1)'], {}), '(DEFAULT_PANDAS_OBJECTS, i + 1)\n', (1023, 1054), False, 'import itertools\n')] |
import os
DATA_URL_ROOT = 'http://www.earthsystemmodeling.org/download/data'
# If fname doesn't exist, retrieve it from the remote server via http.
def cache_data_file(fname, DATA_URL_ROOT=DATA_URL_ROOT):
import sys
if sys.version_info[0] >= 3:
from urllib.request import urlopen, URLError
else:
from urllib2 import urlopen, URLError
from shutil import copyfileobj
status_ok = True
if not os.path.exists(fname):
url = os.path.join(DATA_URL_ROOT, os.path.basename(fname))
print('Retrieving ' + url + '...\n')
try:
req = urlopen(url)
except URLError:
print('Error opening %s' % url)
status_ok = False
else:
try:
with open(fname, 'wb') as fp:
copyfileobj(req, fp)
except:
status_ok = False
return status_ok
def cache_data_files():
# Filenames to download.
datafilelist = ["aggregAtlanticESTOFS.nc",
"GRIDSPEC_ACCESS1.nc",
"ll1deg_grid.nc",
"ll2.5deg_grid.nc",
"mpas_uniform_10242_dual_counterclockwise.nc",
"so_Omon_GISS-E2.nc",
"T42_grid.nc",
]
# Create data subdirectory if it doesn't exist.
datadir = os.path.join('examples', 'data')
if not os.path.exists(datadir):
os.mkdir(datadir)
# Download each test file.
for fname in datafilelist:
# Retrieve the data files needed for the test cases from the remote server.
status_ok = cache_data_file(os.path.join(datadir, fname))
if not status_ok:
raise IOError("Error downloading '{}'".format(fname))
| [
"os.path.exists",
"urllib2.urlopen",
"shutil.copyfileobj",
"os.path.join",
"os.path.basename",
"os.mkdir"
] | [((1360, 1392), 'os.path.join', 'os.path.join', (['"""examples"""', '"""data"""'], {}), "('examples', 'data')\n", (1372, 1392), False, 'import os\n'), ((433, 454), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (447, 454), False, 'import os\n'), ((1404, 1427), 'os.path.exists', 'os.path.exists', (['datadir'], {}), '(datadir)\n', (1418, 1427), False, 'import os\n'), ((1437, 1454), 'os.mkdir', 'os.mkdir', (['datadir'], {}), '(datadir)\n', (1445, 1454), False, 'import os\n'), ((498, 521), 'os.path.basename', 'os.path.basename', (['fname'], {}), '(fname)\n', (514, 521), False, 'import os\n'), ((599, 611), 'urllib2.urlopen', 'urlopen', (['url'], {}), '(url)\n', (606, 611), False, 'from urllib2 import urlopen, URLError\n'), ((1638, 1666), 'os.path.join', 'os.path.join', (['datadir', 'fname'], {}), '(datadir, fname)\n', (1650, 1666), False, 'import os\n'), ((808, 828), 'shutil.copyfileobj', 'copyfileobj', (['req', 'fp'], {}), '(req, fp)\n', (819, 828), False, 'from shutil import copyfileobj\n')] |
import pyEX as p
from datetime import timedelta
from functools import lru_cache
from .utils import last_month, last_close
from .fetch import fetch, \
fetchStats as backfillStats, \
fetchPeers as backfillPeers, \
fetchNews as backfillNews, \
fetchFinancials as backfillFinancials, \
fetchEarnings as backfillEarnings, \
fetchDividends as backfillDividends, \
fetchCompany as backfillCompany
def whichBackfill(field):
if field == 'DAILY':
return backfillDaily
elif field == 'TICK':
return backfillMinute
elif field == 'STATS':
return backfillStats
elif field == 'PEERS':
return backfillPeers
elif field == 'NEWS':
return backfillNews
elif field == 'FINANCIALS':
return backfillFinancials
elif field == 'EARNINGS':
return backfillEarnings
elif field == 'DIVIDENDS':
return backfillDividends
elif field == 'COMPANY':
return backfillCompany
else:
raise NotImplemented
def backfillDaily(distributor, symbols, timeframe='5y', **kwargs):
if len(symbols) > 0:
return fetch(distributor, p.chartDF, {'timeframe': timeframe}, symbols)
return []
@lru_cache(None)
def _getRange(_from):
dates = []
while _from < last_close():
dates.append(_from)
_from += timedelta(days=1)
return dates
def backfillMinute(distributor, symbols, _from=last_month(), **kwargs):
dates = _getRange(_from)
if len(symbols) > 0:
if len(dates) > len(symbols):
# make dates the iterable
for symbol in symbols:
for date, data in distributor.distribute(p.chartDF, {}, [(symbol, None, date) for date in dates], starmap=True):
yield symbol, data
else:
# make symbols the iterable
for date in dates:
for symbol, data in distributor.distribute(p.chartDF, {'date': date, 'timeframe': None}, symbols):
yield symbol, data
| [
"functools.lru_cache",
"datetime.timedelta"
] | [((1309, 1324), 'functools.lru_cache', 'lru_cache', (['None'], {}), '(None)\n', (1318, 1324), False, 'from functools import lru_cache\n'), ((1439, 1456), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (1448, 1456), False, 'from datetime import timedelta\n')] |
from __future__ import absolute_import, unicode_literals
import sys
import functools
import json
import click
from polyswarm.formatters import base
def is_grouped(fn):
@functools.wraps(fn)
def wrapper(self, text):
return self._depth*'\t'+fn(self, text)
return wrapper
class TextOutput(base.BaseOutput):
name = 'text'
def __init__(self, color=True, output=sys.stdout, **kwargs):
super(TextOutput, self).__init__(output)
self.color = color
self._depth = 0
self.color = color
def _get_score_format(self, score):
if score < 0.15:
return self._white
elif score < 0.4:
return self._yellow
else:
return self._red
def _output(self, output, write):
if write:
click.echo('\n'.join(output) + '\n', file=self.out)
else:
return output
def artifact(self, artifact, write=True):
output = []
output.append(self._blue('SHA256: {hash}'.format(hash=artifact.sha256)))
output.append(self._white('SHA1: {hash}'.format(hash=artifact.sha1)))
output.append(self._white('MD5: {hash}'.format(hash=artifact.md5)))
output.append(self._white('File type: mimetype: {mimetype}, extended_info: {extended_type}'.
format(mimetype=artifact.mimetype, extended_type=artifact.extended_type)))
h = artifact.metadata.hash
if 'ssdeep' in h:
output.append(self._white('SSDEEP: {}'.format(h['ssdeep'])))
if 'tlsh' in h:
output.append(self._white('TLSH: {}'.format(h['tlsh'])))
if 'authentihash' in h:
output.append(self._white('Authentihash: {}'.format(h['authentihash'])))
p = artifact.metadata.pefile
if 'imphash' in p:
output.append(self._white('Imphash: {}'.format(p['imphash'])))
output.append(self._white('First seen: {}'.format(artifact.first_seen)))
output.append(self._white('Last scanned: {}'.format(artifact.last_scanned)))
# Deprecated
output.append(self._white('Last seen: {}'.format(artifact.last_scanned)))
return self._output(output, write)
def artifact_instance(self, instance, write=True, timeout=False):
output = []
output.append(self._white('============================= Artifact Instance ============================='))
output.append(self._white('Scan permalink: {}'.format(instance.permalink)))
if instance.community == 'stream':
output.append(self._white('Detections: This artifact has not been scanned. You can trigger a scan now.'))
elif len(instance.valid_assertions) == 0 and instance.window_closed and not instance.failed:
output.append(self._white('Detections: No engines responded to this scan. You can trigger a rescan now.'))
elif len(instance.valid_assertions) > 0 and instance.window_closed and not instance.failed:
malicious = 'Detections: {}/{} engines reported malicious'\
.format(len(instance.malicious_assertions), len(instance.valid_assertions))
if len(instance.malicious_assertions) > 0:
output.append(self._red(malicious))
else:
output.append(self._white(malicious))
elif not instance.window_closed and not instance.failed:
output.append(self._white('Detections: This scan has not finished running yet.'))
else:
output.append(self._white('Detections: This scan has failed. Please try again.'))
self._open_group()
for assertion in instance.assertions:
if assertion.verdict is False:
output.append('%s: %s' % (self._green(assertion.engine_name), 'Clean'))
elif assertion.verdict is None or assertion.mask is False:
output.append('%s: %s' % (self._blue(assertion.engine_name), 'Engine chose not respond to this bounty.'))
else:
value = 'Malicious'
if assertion.metadata:
value += ', metadata: %s' % json.dumps(assertion.metadata, sort_keys=True)
output.append('%s: %s' % (self._red(assertion.engine_name), value))
self._close_group()
output.append(self._blue('Scan id: {}'.format(instance.id)))
output.extend(self.artifact(instance, write=False))
if instance.failed:
output.append(self._red('Status: Failed'))
elif instance.window_closed:
output.append(self._white('Status: Assertion window closed'))
elif instance.community == 'stream':
output.append(self._white('Status: This artifact has not been scanned. You can trigger a scan now.'))
elif timeout:
output.append(self._yellow('Status: Lookup timed-out, please retry'))
else:
output.append(self._white('Status: Running'))
if instance.type == 'URL':
output.append(self._white('URL: {}'.format(instance.filename)))
else:
output.append(self._white('Filename: {}'.format(instance.filename)))
output.append(self._white('Community: {}'.format(instance.community)))
output.append(self._white('Country: {}'.format(instance.country)))
if instance.polyscore is not None:
formatter = self._get_score_format(instance.polyscore)
output.append(formatter('PolyScore: {:.20f}'.format(instance.polyscore)))
return self._output(output, write)
def hunt(self, result, write=True):
output = []
output.append(self._blue('Hunt Id: {}'.format(result.id)))
if result.active is not None:
output.append(self._white('Active: {}'.format(result.active)))
if result.ruleset_name is not None:
output.append(self._white('Ruleset Name: {}'.format(result.ruleset_name)))
output.append(self._white('Created at: {}'.format(result.created)))
return self._output(output, write)
def hunt_deletion(self, result, write=True):
output = []
output.append(self._yellow('Successfully deleted Hunt:'))
output.extend(self.hunt(result, write=False))
return self._output(output, write)
def hunt_result(self, result, write=True):
output = []
output.append(self._white('Match on rule {name}'.format(name=result.rule_name) +
(', tags: {result_tags}'.format(
result_tags=result.tags) if result.tags != '' else '')))
output.extend(self.artifact_instance(result.artifact, write=False))
return self._output(output, write)
def ruleset(self, result, write=True, contents=False):
output = []
output.append(self._blue('Ruleset Id: {}'.format(result.id)))
output.append(self._white('Name: {}'.format(result.name)))
output.append(self._white('Description: {}'.format(result.description)))
output.append(self._white('Created at: {}'.format(result.created)))
output.append(self._white('Modified at: {}'.format(result.modified)))
if contents:
output.append(self._white('Contents:\n{}'.format(result.yara)))
return self._output(output, write)
def tag_link(self, result, write=True):
output = []
output.append(self._blue('SHA256: {}'.format(result.sha256)))
output.append(self._white('First seen: {}'.format(result.first_seen)))
output.append(self._white('Tags: {}'.format(result.tags)))
output.append(self._white('Families: {}'.format(result.families)))
output.append(self._white('Emerging: {}'.format(result.emerging)))
return self._output(output, write)
def family(self, result, write=True):
output = []
output.append(self._blue('Family: {}'.format(result.name)))
output.append(self._white('Emerging: {}'.format(result.emerging)))
return self._output(output, write)
def tag(self, result, write=True):
output = []
output.append(self._blue('Tag: {}'.format(result.name)))
return self._output(output, write)
def local_artifact(self, artifact, write=True):
output = []
output.append(self._white('Successfully downloaded artifact {} to {}'
.format(artifact.artifact_name, artifact.name)))
return self._output(output, write)
def _dfs_mapping_render(self, output, path, tree, depth=0):
tree_string = (' | ' * (depth - 1)) + ' +-' if depth > 0 else ''
current_path = '.'.join(path)
if not tree:
output.append(self._white(tree_string + current_path))
else:
if path:
output.append(self._white(tree_string + current_path))
for k, v in tree.items():
self._dfs_mapping_render(output, path + [k], v, depth=depth + 1)
def mapping(self, mapping, write=True):
output = []
output.append(self._white('============================= Mapping ============================='))
self._dfs_mapping_render(output, [], mapping.json)
return self._output(output, write)
def metadata(self, instance, write=True):
output = []
output.append(self._white('============================= Metadata ============================='))
output.append(self._blue('Artifact id: {}'.format(instance.id)))
output.append(self._white('Created: {}'.format(instance.created)))
if instance.sha256:
output.append(self._white('SHA256: {}'.format(instance.sha256)))
if instance.sha1:
output.append(self._white('SHA1: {}'.format(instance.sha1)))
if instance.md5:
output.append(self._white('MD5: {}'.format(instance.md5)))
if instance.ssdeep:
output.append(self._white('SSDEEP: {}'.format(instance.ssdeep)))
if instance.tlsh:
output.append(self._white('TLSH: {}'.format(instance.tlsh)))
if instance.first_seen:
output.append(self._white('First seen: {}'.format(instance.first_seen)))
if instance.last_scanned:
output.append(self._white('Last scanned: {}'.format(instance.last_scanned)))
# Deprecated
output.append(self._white('Last seen: {}'.format(instance.last_scanned)))
if instance.mimetype:
output.append(self._white('Mimetype: {}'.format(instance.mimetype)))
if instance.extended_mimetype:
output.append(self._white('Extended mimetype: {}'.format(instance.extended_mimetype)))
if instance.malicious:
output.append(self._white('Malicious: {}'.format(instance.malicious)))
if instance.benign:
output.append(self._white('Benign: {}'.format(instance.benign)))
if instance.total_detections:
output.append(self._white('Total detections: {}'.format(instance.total_detections)))
if instance.domains:
output.append(self._white('Domains:'))
self._open_group()
output.append(self._white('{}'.format(', '.join(instance.domains))))
self._close_group()
if instance.ipv4:
output.append(self._white('Ipv4:'))
self._open_group()
output.append(self._white('{}'.format(', '.join(instance.ipv4))))
self._close_group()
if instance.ipv6:
output.append(self._white('Ipv6:'))
self._open_group()
output.append(self._white('{}'.format(', '.join(instance.ipv6))))
self._close_group()
if instance.urls:
output.append(self._white('Urls:'))
self._open_group()
output.append(self._white('{}'.format(', '.join(instance.urls))))
self._close_group()
if instance.filenames:
output.append(self._white('Filenames:'))
self._open_group()
output.append(self._white('{}'.format(', '.join(instance.filenames))))
self._close_group()
return self._output(output, write)
def assertions(self, instance, write=True):
output = []
output.append(self._white('============================= Assertions Job ============================='))
output.append(self._blue('Assertions Job id: {}'.format(instance.id)))
output.append(self._white('Engine id: {}'.format(instance.engine_id)))
output.append(self._white('Created at: {}'.format(instance.created)))
output.append(self._white('Start date: {}'.format(instance.date_start)))
output.append(self._white('End date: {}'.format(instance.date_end)))
if instance.storage_path is not None:
output.append(self._white('Download: {}'.format(instance.storage_path)))
output.append(self._white('True Positive: {}'.format(instance.true_positive)))
output.append(self._white('True Negative: {}'.format(instance.true_negative)))
output.append(self._white('False Positive: {}'.format(instance.false_positive)))
output.append(self._white('False Negative: {}'.format(instance.false_negative)))
output.append(self._white('Suspicious: {}'.format(instance.suspicious)))
output.append(self._white('Unknown: {}'.format(instance.unknown)))
output.append(self._white('Total: {}'.format(instance.total)))
return self._output(output, write)
def votes(self, instance, write=True):
output = []
output.append(self._white('============================= Votes Job ============================='))
output.append(self._blue('Votes Job id: {}'.format(instance.id)))
output.append(self._white('Engine id: {}'.format(instance.engine_id)))
output.append(self._white('Created at: {}'.format(instance.created)))
output.append(self._white('Start date: {}'.format(instance.date_start)))
output.append(self._white('End date: {}'.format(instance.date_end)))
if instance.storage_path is not None:
output.append(self._white('Download: {}'.format(instance.storage_path)))
output.append(self._white('True Positive: {}'.format(instance.true_positive)))
output.append(self._white('True Negative: {}'.format(instance.true_negative)))
output.append(self._white('False Positive: {}'.format(instance.false_positive)))
output.append(self._white('False Negative: {}'.format(instance.false_negative)))
output.append(self._white('Suspicious: {}'.format(instance.suspicious)))
output.append(self._white('Unknown: {}'.format(instance.unknown)))
output.append(self._white('Total: {}'.format(instance.total)))
return self._output(output, write)
@is_grouped
def _white(self, text):
return click.style(text, fg='white')
@is_grouped
def _yellow(self, text):
return click.style(text, fg='yellow')
@is_grouped
def _red(self, text):
return click.style(text, fg='red')
@is_grouped
def _blue(self, text):
return click.style(text, fg='blue')
@is_grouped
def _green(self, text):
return click.style(text, fg='green')
def _open_group(self):
self._depth += 1
def _close_group(self):
self._depth -= 1
| [
"json.dumps",
"click.style",
"functools.wraps"
] | [((177, 196), 'functools.wraps', 'functools.wraps', (['fn'], {}), '(fn)\n', (192, 196), False, 'import functools\n'), ((14924, 14953), 'click.style', 'click.style', (['text'], {'fg': '"""white"""'}), "(text, fg='white')\n", (14935, 14953), False, 'import click\n'), ((15015, 15045), 'click.style', 'click.style', (['text'], {'fg': '"""yellow"""'}), "(text, fg='yellow')\n", (15026, 15045), False, 'import click\n'), ((15104, 15131), 'click.style', 'click.style', (['text'], {'fg': '"""red"""'}), "(text, fg='red')\n", (15115, 15131), False, 'import click\n'), ((15191, 15219), 'click.style', 'click.style', (['text'], {'fg': '"""blue"""'}), "(text, fg='blue')\n", (15202, 15219), False, 'import click\n'), ((15280, 15309), 'click.style', 'click.style', (['text'], {'fg': '"""green"""'}), "(text, fg='green')\n", (15291, 15309), False, 'import click\n'), ((4130, 4176), 'json.dumps', 'json.dumps', (['assertion.metadata'], {'sort_keys': '(True)'}), '(assertion.metadata, sort_keys=True)\n', (4140, 4176), False, 'import json\n')] |
# Generated by Django 2.2 on 2020-11-19 20:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('paper', '0069_remove_paper_sift_risk_score'),
]
operations = [
migrations.AddField(
model_name='paper',
name='is_removed_by_user',
field=models.BooleanField(default=False, help_text='Hides the paper because it is not allowed.'),
),
]
| [
"django.db.models.BooleanField"
] | [((351, 446), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'help_text': '"""Hides the paper because it is not allowed."""'}), "(default=False, help_text=\n 'Hides the paper because it is not allowed.')\n", (370, 446), False, 'from django.db import migrations, models\n')] |
import pickle
from multiprocessing.pool import ThreadPool as Pool
import utils
with open('file.pkl', 'rb') as file:
myvar = pickle.load(file)
print(len(myvar["GLOBAL_absolute_download_url"]))
def download(absolute_download_url, downloaded_file_path, auth, headers, verify_peer_certificate, proxies):
try:
utils.http_download_binary_file(absolute_download_url, downloaded_file_path, auth, headers, verify_peer_certificate, proxies)
except utils.ConfluenceException as e:
if error_output:
error_print('%sERROR: %s' % ('\t'*(depth+2), e))
else:
print('%sWARNING: %s' % ('\t'*(depth+2), e))
with Pool(processes=1000) as pool:
pool.starmap(utils.http_download_binary_file, zip(
myvar["GLOBAL_absolute_download_url"],
myvar["GLOBAL_downloaded_file_path"],
myvar["GLOBAL_auth"],
myvar["GLOBAL_headers"],
myvar["GLOBAL_verify_peer_certificate"],
myvar["GLOBAL_proxies"]
)) | [
"multiprocessing.pool.ThreadPool",
"pickle.load",
"utils.http_download_binary_file"
] | [((130, 147), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (141, 147), False, 'import pickle\n'), ((660, 680), 'multiprocessing.pool.ThreadPool', 'Pool', ([], {'processes': '(1000)'}), '(processes=1000)\n', (664, 680), True, 'from multiprocessing.pool import ThreadPool as Pool\n'), ((328, 457), 'utils.http_download_binary_file', 'utils.http_download_binary_file', (['absolute_download_url', 'downloaded_file_path', 'auth', 'headers', 'verify_peer_certificate', 'proxies'], {}), '(absolute_download_url, downloaded_file_path,\n auth, headers, verify_peer_certificate, proxies)\n', (359, 457), False, 'import utils\n')] |
import markdown2
import re
import os
from os.path import splitext
link_patterns = [(re.compile(
r'((([A-Za-z]{3,9}:(?:\/\/)?)(?:[\-;:&=\+\$,\w]+@)?[A-Za-z0-9\.\-]+(:[0-9]+)?|(?:www\.|[\-;:&=\+\$,\w]+@)[A-Za-z0-9\.\-]+)((?:\/[\+~%\/\.\w\-_]*)?\??(?:[\-\+=&;%@\.\w_]*)#?(?:[\.\!\/\\\w]*))?)'), r'\1')]
head = "<!DOCTYPE html>\n<html>\n<head>\n\t<meta charset='utf-8'/>\n\t<title>" + "Marked" + \
"</title>\n\t\n</head>\n<body>\n"
finHead = "</body>\n</html>"
def convert(md_input, html_output):
malist = os.listdir(f'./'+md_input)
for i in malist:
f = open(f'./{md_input}/{i}', "r")
html = markdown2.markdown(
f.read(), extras=["link-patterns"], link_patterns=link_patterns)
nomFichier = os.path.splitext(i)[0]
print(f'Le fichier "{nomFichier}" est convertie !')
html_file = open(f'./{html_output}/{nomFichier}.html', 'w')
html_file.write(head)
html_file.write(html)
html_file.write(finHead)
"""
<?php
$command = escapeshellcmd('/usr/custom/test.py');
$output = shell_exec($command);
echo $output;
?>
""" | [
"os.listdir",
"os.path.splitext",
"re.compile"
] | [((518, 546), 'os.listdir', 'os.listdir', (["(f'./' + md_input)"], {}), "(f'./' + md_input)\n", (528, 546), False, 'import os\n'), ((86, 332), 're.compile', 're.compile', (['"""((([A-Za-z]{3,9}:(?:\\\\/\\\\/)?)(?:[\\\\-;:&=\\\\+\\\\$,\\\\w]+@)?[A-Za-z0-9\\\\.\\\\-]+(:[0-9]+)?|(?:www\\\\.|[\\\\-;:&=\\\\+\\\\$,\\\\w]+@)[A-Za-z0-9\\\\.\\\\-]+)((?:\\\\/[\\\\+~%\\\\/\\\\.\\\\w\\\\-_]*)?\\\\??(?:[\\\\-\\\\+=&;%@\\\\.\\\\w_]*)#?(?:[\\\\.\\\\!\\\\/\\\\\\\\\\\\w]*))?)"""'], {}), "(\n '((([A-Za-z]{3,9}:(?:\\\\/\\\\/)?)(?:[\\\\-;:&=\\\\+\\\\$,\\\\w]+@)?[A-Za-z0-9\\\\.\\\\-]+(:[0-9]+)?|(?:www\\\\.|[\\\\-;:&=\\\\+\\\\$,\\\\w]+@)[A-Za-z0-9\\\\.\\\\-]+)((?:\\\\/[\\\\+~%\\\\/\\\\.\\\\w\\\\-_]*)?\\\\??(?:[\\\\-\\\\+=&;%@\\\\.\\\\w_]*)#?(?:[\\\\.\\\\!\\\\/\\\\\\\\\\\\w]*))?)'\n )\n", (96, 332), False, 'import re\n'), ((742, 761), 'os.path.splitext', 'os.path.splitext', (['i'], {}), '(i)\n', (758, 761), False, 'import os\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.autograd as autograd
import torch.optim as optim
import pre_process as pp
inputs, outputs = pp.get_data()
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.layer1 = nn.Linear(6, 6)
self.layer2 = nn.Linear(6, 12)
self.layer3 = nn.Linear(12, 12)
self.layer4 = nn.Linear(12, 12)
self.layer5 = nn.Linear(12, 6)
self.layer6 = nn.Linear(6, 18)
def forward(self, inputs):
hidden_neurons = F.relu(self.layer1(inputs))
hidden_neurons = F.relu(self.layer2(hidden_neurons))
hidden_neurons = F.relu(self.layer3(hidden_neurons))
hidden_neurons = F.relu(self.layer4(hidden_neurons))
hidden_neurons = F.relu(self.layer5(hidden_neurons))
output_neurons = F.softmax(self.layer6(hidden_neurons))
return output_neurons
model1 = Model()
model2 = Model()
model1.cuda()
model2.cuda()
try:
model1.load_state_dict(torch.load('model1'))
model2.load_state_dict(torch.load('model2'))
except Exception as e:
print(e)
tensor = autograd.Variable(torch.cuda.FloatTensor(inputs))
target = autograd.Variable(torch.cuda.FloatTensor(outputs))
criterion = nn.MSELoss()
optimizer1 = optim.Adam(model1.parameters(), lr=0.001)
optimizer2 = optim.Adam(model2.parameters(), lr=0.001)
for i in range(100000):
optimizer1.zero_grad()
output1 = model1(tensor)
print(output1[1])
loss1 = criterion(output1, target)
loss1.backward()
optimizer1.step()
optimizer2.zero_grad()
output2 = model2(tensor)
print(output2[1])
loss2 = criterion(output2, target)
loss2.backward()
optimizer2.step()
torch.save(model1.state_dict(), 'model1')
torch.save(model2.state_dict(), 'model2')
| [
"torch.cuda.FloatTensor",
"torch.load",
"torch.nn.MSELoss",
"torch.nn.Linear",
"pre_process.get_data"
] | [((175, 188), 'pre_process.get_data', 'pp.get_data', ([], {}), '()\n', (186, 188), True, 'import pre_process as pp\n'), ((1275, 1287), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (1285, 1287), True, 'import torch.nn as nn\n'), ((1169, 1199), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['inputs'], {}), '(inputs)\n', (1191, 1199), False, 'import torch\n'), ((1229, 1260), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['outputs'], {}), '(outputs)\n', (1251, 1260), False, 'import torch\n'), ((301, 316), 'torch.nn.Linear', 'nn.Linear', (['(6)', '(6)'], {}), '(6, 6)\n', (310, 316), True, 'import torch.nn as nn\n'), ((339, 355), 'torch.nn.Linear', 'nn.Linear', (['(6)', '(12)'], {}), '(6, 12)\n', (348, 355), True, 'import torch.nn as nn\n'), ((378, 395), 'torch.nn.Linear', 'nn.Linear', (['(12)', '(12)'], {}), '(12, 12)\n', (387, 395), True, 'import torch.nn as nn\n'), ((418, 435), 'torch.nn.Linear', 'nn.Linear', (['(12)', '(12)'], {}), '(12, 12)\n', (427, 435), True, 'import torch.nn as nn\n'), ((458, 474), 'torch.nn.Linear', 'nn.Linear', (['(12)', '(6)'], {}), '(12, 6)\n', (467, 474), True, 'import torch.nn as nn\n'), ((497, 513), 'torch.nn.Linear', 'nn.Linear', (['(6)', '(18)'], {}), '(6, 18)\n', (506, 513), True, 'import torch.nn as nn\n'), ((1034, 1054), 'torch.load', 'torch.load', (['"""model1"""'], {}), "('model1')\n", (1044, 1054), False, 'import torch\n'), ((1083, 1103), 'torch.load', 'torch.load', (['"""model2"""'], {}), "('model2')\n", (1093, 1103), False, 'import torch\n')] |
from models import MobiNetV3
from models.Core import Config
import os
from glob import glob
config = Config()
model = MobiNetV3.FastModel(config)
config.WEIGHTS_FILE = "weights/MobiNetV3/weight-0.73.h5"
if __name__ == "__main__":
#model.test_image("FastDetector/datasets/10_rosbag/images/1565608339175915704.jpg")
model.test_images(glob("FastDetector/datasets/10_rosbag/images/*.jpg"), skip=100) | [
"models.Core.Config",
"models.MobiNetV3.FastModel",
"glob.glob"
] | [((101, 109), 'models.Core.Config', 'Config', ([], {}), '()\n', (107, 109), False, 'from models.Core import Config\n'), ((118, 145), 'models.MobiNetV3.FastModel', 'MobiNetV3.FastModel', (['config'], {}), '(config)\n', (137, 145), False, 'from models import MobiNetV3\n'), ((342, 394), 'glob.glob', 'glob', (['"""FastDetector/datasets/10_rosbag/images/*.jpg"""'], {}), "('FastDetector/datasets/10_rosbag/images/*.jpg')\n", (346, 394), False, 'from glob import glob\n')] |
#!/usr/bin/env python
from wand.image import Image
from wand.drawing import Drawing
from wand.color import Color
# http://www.imagemagick.org/Usage/draw/#colorspace
# original imagemagick command:
# convert -size 81x81 xc:black -fill white -draw 'circle 40,40 40,3' \
# circle_raw.png
#
# convert -size 81x81 xc:black -fill white -draw 'circle 40,40 40,3' \
# -gamma 2.2 circle_gamma.png
#
# convert -size 81x81 xc:black -set colorspace RGB \
# -fill white -draw 'circle 40,40 40,3' \
# -colorspace sRGB circle_sRGB.png
w = 81
h = 81
bgcolor = Color('black')
with Image(width=w, height=h, background=bgcolor) as img:
with Drawing() as draw:
draw.fill_color = Color('white')
draw.circle((40, 40), (40, 3))
draw(img)
img.save(filename='sample13a.png')
with Image(width=w, height=h, background=bgcolor) as img:
with Drawing() as draw:
draw.fill_color = Color('white')
draw.circle((40, 40), (40, 3))
draw(img)
img.gamma(2.2)
img.save(filename='sample13b.png')
with Image(width=w, height=h, background=bgcolor) as img:
img.colorspace = 'rgb'
with Drawing() as draw:
draw.fill_color = Color('white')
draw.circle((40, 40), (40, 3))
draw(img)
img.colorspace = 'srgb'
img.save(filename='sample13c.png')
| [
"wand.image.Image",
"wand.drawing.Drawing",
"wand.color.Color"
] | [((580, 594), 'wand.color.Color', 'Color', (['"""black"""'], {}), "('black')\n", (585, 594), False, 'from wand.color import Color\n'), ((601, 645), 'wand.image.Image', 'Image', ([], {'width': 'w', 'height': 'h', 'background': 'bgcolor'}), '(width=w, height=h, background=bgcolor)\n', (606, 645), False, 'from wand.image import Image\n'), ((825, 869), 'wand.image.Image', 'Image', ([], {'width': 'w', 'height': 'h', 'background': 'bgcolor'}), '(width=w, height=h, background=bgcolor)\n', (830, 869), False, 'from wand.image import Image\n'), ((1068, 1112), 'wand.image.Image', 'Image', ([], {'width': 'w', 'height': 'h', 'background': 'bgcolor'}), '(width=w, height=h, background=bgcolor)\n', (1073, 1112), False, 'from wand.image import Image\n'), ((663, 672), 'wand.drawing.Drawing', 'Drawing', ([], {}), '()\n', (670, 672), False, 'from wand.drawing import Drawing\n'), ((708, 722), 'wand.color.Color', 'Color', (['"""white"""'], {}), "('white')\n", (713, 722), False, 'from wand.color import Color\n'), ((887, 896), 'wand.drawing.Drawing', 'Drawing', ([], {}), '()\n', (894, 896), False, 'from wand.drawing import Drawing\n'), ((932, 946), 'wand.color.Color', 'Color', (['"""white"""'], {}), "('white')\n", (937, 946), False, 'from wand.color import Color\n'), ((1157, 1166), 'wand.drawing.Drawing', 'Drawing', ([], {}), '()\n', (1164, 1166), False, 'from wand.drawing import Drawing\n'), ((1202, 1216), 'wand.color.Color', 'Color', (['"""white"""'], {}), "('white')\n", (1207, 1216), False, 'from wand.color import Color\n')] |
import unittest
from src.lambda_ackermann_recursive import ackermann
import test.ackermann.basetests_ackermann as BT
def call_function(a: int, b: int) -> int:
return ackermann(a, b)
class Test_ValidInput(unittest.TestCase, BT.ValidInput):
def base_function(self, a, b):
return call_function(a, b)
class Test_ExceptionHandling(unittest.TestCase, BT.ExceptionHandling):
def base_function(self, a, b):
return call_function(a, b)
class Test_RunningTime(unittest.TestCase):
def test_time(self):
self.assertNotIsInstance(call_function(3, 4), Exception)
def test_recursion_error(self):
with self.assertRaises(Exception): call_function(4, 3)
if __name__ == "__main__":
unittest.main() | [
"unittest.main",
"src.lambda_ackermann_recursive.ackermann"
] | [((172, 187), 'src.lambda_ackermann_recursive.ackermann', 'ackermann', (['a', 'b'], {}), '(a, b)\n', (181, 187), False, 'from src.lambda_ackermann_recursive import ackermann\n'), ((730, 745), 'unittest.main', 'unittest.main', ([], {}), '()\n', (743, 745), False, 'import unittest\n')] |
'''
objective :-
------------
detect and classify shapes and their location in an image with low latency and high accuracy.
it must account for false positives and empty images.
modules used :-
---------------
1 - open cv for image processing tasks.
2 - easyocr for text-recognition tasks.
3 - threading for running text-recognition tasks on a separate thread to reduce latency.
4 - atexit to join all threads on program termination.
5 - Text_Detection which is a manually designed module for text-detection using east detection algorithm.
Inputs :-
---------
1 - captured frame from the camera video stream.
Outputs :-
----------
1 - whether text has been detected or not.
2 - coordinates of text if detected.
3 - an array containing the objects detected and what is the character that this object represents.
Algorithm :-
------------
1 - apply east text-detection on th input frame
2 - if a new character has been detected
a - capture the coordinates of the detected character
b - add a new thread that will be assigned the duty of handling the text-recognition using easy ocr.
c - create an array which contains many copies of the input frame but rotated in different angles.
d - start the thread which will run text-recogntion using the array provided in the previous step.
e - return that text has been detected and return its coordinates
else
return the input frame and that no text has been detected
4 - wait for all threads to finish and join them with the main thread
5 - return an array containing the objects detected and what is the character that this object represents.
'''
from cv2 import cv2
from AlphanumericCharacterDetection.recogniser import Recognize
from os import remove, listdir
import time
import numpy as np
WHITE_LIST = ['A','B','C','c','D','E','F','G','H','I','J','K','k','L','l','M','m','N','O','o','P','p','Q','R','S','s','T','U','u','V','v','W','w','X','x','Y','y','Z','z','0','1','2','3','4','5','6','7','8','9']
def rotate_image(image, angle):
if angle == 0: return image
image_center = tuple(np.array(image.shape[1::-1]) / 2)
rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
result = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)
return result
def alphanum_B(image, id):
############################################################################################################## REPLACE [0]
for angle in range(0, 360, 90):
cv2.imwrite("AlphanumericCharacterDetection/results/" + str(id) + "_" + str(angle) + ".jpg", rotate_image(image, angle))
# x= cv2.imwrite("AlphanumericCharacterDetection/results/" + str(id) + ".jpg", image)
############################################################################################################## [0]
out_character = ""
out_confidence = 0
out_character = Recognize("AlphanumericCharacterDetection/results/")
if out_character is None or out_character == '' :
return None,None,None
else:
pass
############################################################################################################## REPLACE [1]
for angle in range(0, 360, 90):
remove("AlphanumericCharacterDetection/results/" + str(id) + "_" + str(angle) + ".jpg")
# remove("AlphanumericCharacterDetection/results/" + str(id) + ".jpg")
############################################################################################################## end [1]
############################################################################################################## UNCOMMENT [2]
out_character = sorted(out_character, key = lambda x: x[1],reverse=True) # sort by confidence
############### special cases ##############
# we prefer M, T, C, 4, 3 than other chars #
############################################
preferred = ['M','T','C','4', '3']
for i in preferred:
if out_character[0] == i:
return out_character[0]
for i in range(len(out_character)):
if out_character[i][0] in preferred:
temp = list(out_character[i])
temp[1] += 0.1
out_character[i] += tuple(temp)
out_character = sorted(out_character, key = lambda x: x[1],reverse=True) # sort again by confidence
out_character = out_character[0]
############################################################################################################# [2]
return out_character
if __name__ == '__main__':
image = cv2.imread("Sample10.jpg")
timer = time.perf_counter()
character = alphanum_B(image, 1)
print(time.perf_counter()-timer)
print(character)
| [
"cv2.cv2.getRotationMatrix2D",
"cv2.cv2.warpAffine",
"cv2.cv2.imread",
"time.perf_counter",
"AlphanumericCharacterDetection.recogniser.Recognize",
"numpy.array"
] | [((2094, 2143), 'cv2.cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['image_center', 'angle', '(1.0)'], {}), '(image_center, angle, 1.0)\n', (2117, 2143), False, 'from cv2 import cv2\n'), ((2154, 2228), 'cv2.cv2.warpAffine', 'cv2.warpAffine', (['image', 'rot_mat', 'image.shape[1::-1]'], {'flags': 'cv2.INTER_LINEAR'}), '(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)\n', (2168, 2228), False, 'from cv2 import cv2\n'), ((2843, 2895), 'AlphanumericCharacterDetection.recogniser.Recognize', 'Recognize', (['"""AlphanumericCharacterDetection/results/"""'], {}), "('AlphanumericCharacterDetection/results/')\n", (2852, 2895), False, 'from AlphanumericCharacterDetection.recogniser import Recognize\n'), ((4617, 4643), 'cv2.cv2.imread', 'cv2.imread', (['"""Sample10.jpg"""'], {}), "('Sample10.jpg')\n", (4627, 4643), False, 'from cv2 import cv2\n'), ((4654, 4673), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (4671, 4673), False, 'import time\n'), ((2049, 2077), 'numpy.array', 'np.array', (['image.shape[1::-1]'], {}), '(image.shape[1::-1])\n', (2057, 2077), True, 'import numpy as np\n'), ((4715, 4734), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (4732, 4734), False, 'import time\n')] |
# -*- coding: utf-8 -*-
from datetime import datetime
from django.conf import settings
from haystack.indexes import Indexable
from opps.containers.search_indexes import ContainerIndex
from .models import Post, Album, Link
migration_date = getattr(settings, 'MIGRATION_DATE', None)
if migration_date:
m_date = datetime.strptime(migration_date, "%Y-%m-%d").date()
Post.is_legacy = lambda self: m_date >= self.date_insert.date()
else:
Post.is_legacy = lambda self: False
class PostIndex(ContainerIndex, Indexable):
def get_model(self):
return Post
class AlbumIndex(ContainerIndex, Indexable):
def get_model(self):
return Album
class LinkIndex(ContainerIndex, Indexable):
def get_model(self):
return Link
| [
"datetime.datetime.strptime"
] | [((320, 365), 'datetime.datetime.strptime', 'datetime.strptime', (['migration_date', '"""%Y-%m-%d"""'], {}), "(migration_date, '%Y-%m-%d')\n", (337, 365), False, 'from datetime import datetime\n')] |
from gbe.forms import ParticipantForm
from django.forms import (
ChoiceField,
MultipleChoiceField,
)
from gbe_forms_text import (
how_heard_options,
participant_labels,
)
from gbetext import (
act_casting_label,
states_options,
)
from scheduler.idd import get_occurrences
from gbe.models import Show
from gbe.views.act_display_functions import get_act_casting
def get_participant_form(profile, prefix='Contact Info'):
participantform = ParticipantForm(
instance=profile,
initial={'email': profile.user_object.email,
'first_name': profile.user_object.first_name,
'last_name': profile.user_object.last_name},
prefix=prefix)
if profile.state:
participantform.fields['state'] = MultipleChoiceField(
choices=[(profile.state,
dict(states_options)[profile.state])],)
else:
participantform.fields['state'] = MultipleChoiceField(
choices=[('--------', 'No State Chosen')],)
how_heard_selected = []
for option in how_heard_options:
if option[0] in profile.how_heard:
how_heard_selected += [option]
if len(how_heard_selected) == 0:
how_heard_selected = [('', ''), ]
participantform.fields['how_heard'] = MultipleChoiceField(
choices=how_heard_selected,
required=False,
label=participant_labels['how_heard'])
return participantform
# used in review flex bid view and the show dashboard, takes a
# base form to play well with the inheritance in bid review
def make_show_casting_form(conference, base_form, start, casting):
choices = []
response = get_occurrences(
foreign_event_ids=Show.objects.filter(
e_conference=conference).values_list('eventitem_id', flat=True))
for occurrence in response.occurrences:
choices += [(occurrence.eventitem.pk, str(occurrence))]
base_form.fields['show'] = ChoiceField(
choices=choices,
label='Pick a Show',
initial=start)
base_form.fields['casting'] = ChoiceField(
choices=get_act_casting(),
required=False,
label=act_casting_label,
initial=casting)
return base_form
| [
"gbe.forms.ParticipantForm",
"django.forms.ChoiceField",
"gbe.views.act_display_functions.get_act_casting",
"gbe.models.Show.objects.filter",
"django.forms.MultipleChoiceField"
] | [((466, 659), 'gbe.forms.ParticipantForm', 'ParticipantForm', ([], {'instance': 'profile', 'initial': "{'email': profile.user_object.email, 'first_name': profile.user_object.\n first_name, 'last_name': profile.user_object.last_name}", 'prefix': 'prefix'}), "(instance=profile, initial={'email': profile.user_object.\n email, 'first_name': profile.user_object.first_name, 'last_name':\n profile.user_object.last_name}, prefix=prefix)\n", (481, 659), False, 'from gbe.forms import ParticipantForm\n'), ((1295, 1402), 'django.forms.MultipleChoiceField', 'MultipleChoiceField', ([], {'choices': 'how_heard_selected', 'required': '(False)', 'label': "participant_labels['how_heard']"}), "(choices=how_heard_selected, required=False, label=\n participant_labels['how_heard'])\n", (1314, 1402), False, 'from django.forms import ChoiceField, MultipleChoiceField\n'), ((1954, 2018), 'django.forms.ChoiceField', 'ChoiceField', ([], {'choices': 'choices', 'label': '"""Pick a Show"""', 'initial': 'start'}), "(choices=choices, label='Pick a Show', initial=start)\n", (1965, 2018), False, 'from django.forms import ChoiceField, MultipleChoiceField\n'), ((946, 1008), 'django.forms.MultipleChoiceField', 'MultipleChoiceField', ([], {'choices': "[('--------', 'No State Chosen')]"}), "(choices=[('--------', 'No State Chosen')])\n", (965, 1008), False, 'from django.forms import ChoiceField, MultipleChoiceField\n'), ((2107, 2124), 'gbe.views.act_display_functions.get_act_casting', 'get_act_casting', ([], {}), '()\n', (2122, 2124), False, 'from gbe.views.act_display_functions import get_act_casting\n'), ((1717, 1761), 'gbe.models.Show.objects.filter', 'Show.objects.filter', ([], {'e_conference': 'conference'}), '(e_conference=conference)\n', (1736, 1761), False, 'from gbe.models import Show\n')] |
from __future__ import annotations
import typing
from ctc import spec
from ctc.protocols import balancer_utils
from .. import analytics_spec
async def async_compute_buybacks(
blocks: list[int], verbose: bool = False
) -> analytics_spec.MetricGroup:
return {
'name': 'Buybacks',
'metrics': {
'buybacks_usd': (await async_compute_tribe_buybacks_usd(blocks)),
},
}
async def async_compute_tribe_buybacks_usd(
blocks: list[int], swaps: typing.Optional[spec.DataFrame] = None
) -> analytics_spec.MetricData:
from ctc.toolbox import pd_utils
# load swaps
if swaps is None:
swaps = await balancer_utils.async_get_pool_swaps(
pool_address='0xc1382fe6e17bcdbc3d35f73f5317fbf261ebeecd'
)
swaps = typing.cast(
spec.DataFrame,
swaps.droplevel('transaction_index').droplevel('log_index'),
)
# filter tribe buys
fei = '0x956f47f50a910163d8bf957cf5846d573e7f87ca'
tribe_buys: typing.Any = swaps[swaps['arg__tokenOut'] == fei] # type: ignore
tribe_buys = tribe_buys['arg__amountOut'].map(float) / 1e18
cummulative_tribe_buys = tribe_buys.cumsum()
# cummulative_tribe_buys = evm.interpolate_block_series(
# start_block=min(blocks),
# pre_fill_value=0,
# series=cummulative_tribe_buys,
# end_block=max(blocks),
# )
cummulative_tribe_buys = pd_utils.interpolate_series(
series=cummulative_tribe_buys,
start_index=min(blocks),
end_index=max(blocks),
pre_fill_value=0,
)
# filter tribe sells
tribe_sells_df = swaps[swaps['arg__tokenIn'] == fei] # type: ignore
if len(tribe_sells_df) > 0:
tribe_sells = tribe_sells_df['arg__amountIn'].map(float) / 1e18
cummulative_tribe_sells = tribe_sells.cumsum()
# cummulative_tribe_sells = evm.interpolate_block_series(
# start_block=min(blocks),
# pre_fill_value=0,
# series=cummulative_tribe_sells,
# end_block=max(blocks),
# )
cummulative_tribe_sells = pd_utils.interpolate_series(
series=cummulative_tribe_sells,
start_index=min(blocks),
end_index=max(blocks),
pre_fill_value=0,
)
net_tribe_buys = cummulative_tribe_buys - cummulative_tribe_sells
else:
net_tribe_buys = cummulative_tribe_buys
return {
'name': 'Buybacks USD',
'values': [net_tribe_buys[block] for block in blocks],
'units': 'FEI',
}
| [
"ctc.protocols.balancer_utils.async_get_pool_swaps"
] | [((662, 761), 'ctc.protocols.balancer_utils.async_get_pool_swaps', 'balancer_utils.async_get_pool_swaps', ([], {'pool_address': '"""0xc1382fe6e17bcdbc3d35f73f5317fbf261ebeecd"""'}), "(pool_address=\n '0xc1382fe6e17bcdbc3d35f73f5317fbf261ebeecd')\n", (697, 761), False, 'from ctc.protocols import balancer_utils\n')] |
from django.contrib import admin
from cameras.models import Camera
# Register your models here.
admin.site.register(Camera)
| [
"django.contrib.admin.site.register"
] | [((97, 124), 'django.contrib.admin.site.register', 'admin.site.register', (['Camera'], {}), '(Camera)\n', (116, 124), False, 'from django.contrib import admin\n')] |
"""This module provides Python bindings for the Discovery API of DGILib."""
from ctypes import byref, create_string_buffer
from pydgilib.dgilib_config import GET_STRING_SIZE
from pydgilib.dgilib_exceptions import DeviceReturnError
class DGILibDiscovery(object):
"""Python bindings for DGILib Discovery.
DGILib is a Dynamic-Link Library (DLL) to help software applications
communicate with Data Gateway Interface (DGI) devices. See the Data
Gateway Interface user guide for further details. DGILib handles
the low-level USB communication and adds a level of buffering for
minimizing the chance of overflows.
TODO?
2.1.1. initialize_status_change_notification
Initializes the system necessary for using the status change notification
callback mechanisms. A handle will be created to keep track of the
registered callbacks. This function must always be called before
registering and unregistering notification callbacks.
Function definition
void initialize_status_change_notification(uint32_t* handlep)
Parameters
handlep Pointer to a variable that will hold the handle
2.1.2. uninitialize_status_change_notification
Uninitializes the status change notification callback mechanisms. This
function must be called when shutting down to clean up memory allocations.
Function definition
void uninitialize_status_change_notification(uint32_t handle)
Parameters
handle Handle to uninitialize
2.1.3. register_for_device_status_change_notifications
Registers provided function pointer with the device status change
mechanism. Whenever there is a change (device connected or disconnected)
the callback will be executed. Note that it is not allowed to connect to a
device in the context of the callback function. The callback function has
the following definition: typedef void (*DeviceStatusChangedCallBack)(char*
device_name, char* device_serial, BOOL connected)
Function definition
void register_for_device_status_change_notifications(uint32_t handle,
DeviceStatusChangedCallBack deviceStatusChangedCallBack)
Parameters
handle Handle to change notification mechanisms
deviceStatusChangedCallBack Function pointer that will be called when the
devices change
2.1.4. unregister_for_device_status_change_notifications
Unregisters previously registered function pointer from the device status
change mechanism.
Function definition
void unregister_for_device_status_change_notifications(uint32_t handle,
DeviceStatusChangedCallBack deviceStatusChangedCallBack)
Parameters
handle Handle to change notification mechanisms
deviceStatusChangedCallBack Function pointer that will be removed
"""
dgilib = None
verbose = None
def discover(self):
"""`discover`.
Triggers a scan to find available devices in the system. The result
will be immediately available through the `get_device_count`,
`get_device_name` and `get_device_serial` functions.
`void discover(void)`
"""
self.dgilib.discover()
def get_device_count(self):
"""`get_device_count`.
Returns the number of devices detected.
`int get_device_count(void)`
:return: The number of devices detected
:rtype: int
"""
device_count = self.dgilib.get_device_count()
if self.verbose:
print(f"device_count: {device_count}")
return device_count
def get_device_name(self, index=0):
"""`get_device_name`.
Gets the name of a detected device.
`int get_device_name(int index, char* name)`
+------------+------------+
| Parameter | Description |
+============+============+
| *index* | Index of device ranges from 0 to `get_device_count` - 1 |
| *name* | Pointer to buffer where name of device can be stored. 100
or more bytes must be allocated |
+------------+------------+
:param index: Index of device ranges from 0 to `get_device_count` - 1
:type index: int
:return: The name of a detected device
:rtype: str
:raises: :exc:`DeviceReturnError`
"""
name = create_string_buffer(GET_STRING_SIZE)
res = self.dgilib.get_device_name(index, byref(name))
if self.verbose:
print(f"\t{res} get_device_name: {name.value}")
if res:
raise DeviceReturnError(f"get_device_name returned: {res}")
return name.value
def get_device_serial(self, index=0):
"""`get_device_serial`.
Gets the serial number of a detected device.
`int get_device_serial(int index, char* sn)`
+------------+------------+
| Parameter | Description |
+============+============+
| *index* | Index of device ranges from 0 to `get_device_count` - 1 |
| *sn* | Pointer to buffer where the serial number of the device can
be stored. 100 or more bytes must be allocated. This is used when
connecting to a device |
+------------+------------+
:param index: Index of device ranges from 0 to `get_device_count` - 1
:type index: int
:return: The serial number of a detected device
:rtype: str
:raises: :exc:`DeviceReturnError`
"""
device_sn = create_string_buffer(GET_STRING_SIZE)
res = self.dgilib.get_device_serial(index, byref(device_sn))
if self.verbose:
print(f"\t{res} get_device_serial: {device_sn.value}")
if res:
raise DeviceReturnError(f"get_device_serial returned: {res}")
return device_sn.value
def is_msd_mode(self, device_sn):
"""`is_msd_mode`.
EDBG devices can be set to a mass storage mode where the DGI is
unavailable. In such cases the device is still detected by DGILib, but
it won't be possible to directly connect to it. This command is used
to check if the device is in such a mode.
A non-zero return value indicates that the mode must be changed by
`set_mode` before proceeding.
`int is_msd_mode(char* sn)`
+------------+------------+
| Parameter | Description |
+============+============+
| *sn* | Serial number of the device to check |
+------------+------------+
:param device_sn: Serial number of the device to check (defaults to
self.device_sn)
:type device_sn: str or None
:return: A non-zero return value indicates that the mode must be
changed by `set_mode` before proceeding.
:rtype: int
"""
msd_mode = self.dgilib.is_msd_mode(device_sn)
if self.verbose:
print(f"msd_mode: {msd_mode}")
return msd_mode
def set_mode(self, device_sn, nmbed=1):
"""`set_mode`.
This function is used to temporarily set the EDBG to a specified mode.
`int set_mode(char* sn, int nmbed)`
+------------+------------+
| Parameter | Description |
+============+============+
| *sn* | Serial number of the device to set |
| *nmbed* | 0 - Set to mbed mode. 1 - Set to DGI mode |
+------------+------------+
:param device_sn: Serial number of the device to set
:type device_sn: str
:param nmbed: 0 - Set to mbed mode. 1 - Set to DGI mode (defaults to
DGI mode)
:type nmbed: int
:raises: :exc:`DeviceReturnError`
"""
res = self.dgilib.set_mode(device_sn, nmbed)
if self.verbose:
print(f"\t{res} set_mode {nmbed}")
if res:
raise DeviceReturnError(f"set_mode returned: {res}")
| [
"ctypes.byref",
"pydgilib.dgilib_exceptions.DeviceReturnError",
"ctypes.create_string_buffer"
] | [((4280, 4317), 'ctypes.create_string_buffer', 'create_string_buffer', (['GET_STRING_SIZE'], {}), '(GET_STRING_SIZE)\n', (4300, 4317), False, 'from ctypes import byref, create_string_buffer\n'), ((5424, 5461), 'ctypes.create_string_buffer', 'create_string_buffer', (['GET_STRING_SIZE'], {}), '(GET_STRING_SIZE)\n', (5444, 5461), False, 'from ctypes import byref, create_string_buffer\n'), ((4367, 4378), 'ctypes.byref', 'byref', (['name'], {}), '(name)\n', (4372, 4378), False, 'from ctypes import byref, create_string_buffer\n'), ((4499, 4552), 'pydgilib.dgilib_exceptions.DeviceReturnError', 'DeviceReturnError', (['f"""get_device_name returned: {res}"""'], {}), "(f'get_device_name returned: {res}')\n", (4516, 4552), False, 'from pydgilib.dgilib_exceptions import DeviceReturnError\n'), ((5513, 5529), 'ctypes.byref', 'byref', (['device_sn'], {}), '(device_sn)\n', (5518, 5529), False, 'from ctypes import byref, create_string_buffer\n'), ((5657, 5712), 'pydgilib.dgilib_exceptions.DeviceReturnError', 'DeviceReturnError', (['f"""get_device_serial returned: {res}"""'], {}), "(f'get_device_serial returned: {res}')\n", (5674, 5712), False, 'from pydgilib.dgilib_exceptions import DeviceReturnError\n'), ((7772, 7818), 'pydgilib.dgilib_exceptions.DeviceReturnError', 'DeviceReturnError', (['f"""set_mode returned: {res}"""'], {}), "(f'set_mode returned: {res}')\n", (7789, 7818), False, 'from pydgilib.dgilib_exceptions import DeviceReturnError\n')] |
import math
def primdist(x):
return max([abs(xj - math.floor(xj + 0.5)) for xj in x])
| [
"math.floor"
] | [((53, 73), 'math.floor', 'math.floor', (['(xj + 0.5)'], {}), '(xj + 0.5)\n', (63, 73), False, 'import math\n')] |
"""
Custom added maze tasks with dense rewards and progressively farther goals
For creating expert demonstrations
"""
from typing import Dict, List, Type, Tuple
import numpy as np
from mujoco_maze.custom_maze_task import (
GoalRewardLargeUMaze,
GoalRewardRoom3x5,
GoalRewardRoom3x10,
)
from mujoco_maze.task_common import (
MazeGoal,
MazeTask,
GREEN,
euc_dist,
RewardThresholdList,
)
class Room3x5(GoalRewardRoom3x5):
INNER_REWARD_SCALING: float = 0.01
PENALTY: float = 0
def __init__(self, scale: float, goal: Tuple[float, float], waypoints=None) -> None:
super().__init__(scale)
self.goals = [MazeGoal(np.array(goal) * scale)]
def reward(self, obs: np.ndarray) -> float:
reward = -self.goals[0].euc_dist(obs) / self.scale
if self.termination(obs):
reward = 100.0
return reward
class Room3x5WayPoint(Room3x5):
def __init__(self, scale: float, goal: Tuple[float, float], waypoints=None) -> None:
super().__init__(scale, goal, waypoints)
self.goals = [MazeGoal(np.array(goal) * scale)]
self.waypoints = []
for waypoint in waypoints:
self.waypoints.append(
MazeGoal(
np.array(waypoint) * scale,
rgb=GREEN,
custom_size=0.1 * scale / 2,
)
)
self.visited = np.zeros(len(self.waypoints), dtype=bool)
self.goal_reward = 1000
self.waypoint_reward = 0
# Precalculate distances b/w waypoints
self.rews = np.zeros(len(self.waypoints) + 1)
self.rews[0] = -euc_dist(self.waypoints[0].pos, [0, 0]) / self.scale
for i in range(1, len(self.waypoints)):
self.rews[i] = (
-euc_dist(self.waypoints[i - 1].pos, self.waypoints[i].pos) / self.scale
)
self.rews[-1] = (
-euc_dist(self.waypoints[-1].pos, self.goals[0].pos) / self.scale
)
def reward(self, obs: np.ndarray) -> float:
# If all waypoints were visited
if self.visited.all():
reward = -self.goals[0].euc_dist(obs) / self.scale
if self.termination(obs):
reward = self.goal_reward
else:
# Choose next waypoint
goal_idx = np.argmax(~self.visited)
# Add all remaining distances
reward = np.sum(self.rews[goal_idx + 1 :])
if self.waypoints[goal_idx].neighbor(obs):
self.visited[goal_idx] = True
reward += self.waypoint_reward
else:
reward += -self.waypoints[goal_idx].euc_dist(obs) / self.scale
return reward
class Room3x10(GoalRewardRoom3x10):
INNER_REWARD_SCALING: float = 0.01
PENALTY: float = 0
def __init__(self, scale: float, goal: Tuple[float, float], waypoints=None) -> None:
super().__init__(scale)
self.goals = [MazeGoal(np.array(goal) * scale)]
def reward(self, obs: np.ndarray) -> float:
reward = -self.goals[0].euc_dist(obs) / self.scale
if self.termination(obs):
reward = 100.0
return reward
class LargeUMaze(GoalRewardLargeUMaze):
INNER_REWARD_SCALING: float = 0.01
PENALTY: float = 0
def __init__(self, scale: float, goal: Tuple[float, float], waypoints=None) -> None:
super().__init__(scale)
self.goals = [MazeGoal(np.array(goal) * scale)]
def reward(self, obs: np.ndarray) -> float:
reward = -self.goals[0].euc_dist(obs) / self.scale
if self.termination(obs):
reward = 100.0
return reward
class ExpertTaskRegistry:
REGISTRY: Dict[str, List[Type[MazeTask]]] = {
"DistRoom3x5_1Goals": Room3x5,
"DistRoom3x5WayPoint_3Goals": Room3x5WayPoint,
"DistRoom3x10_1Goals": Room3x10,
"DistLargeUMaze_2Goals": LargeUMaze,
"DistLargeUMaze_4Goals": LargeUMaze,
}
GOALS = {
"DistRoom3x5_1Goals": [(4, 0)],
"DistRoom3x5WayPoint_3Goals": [(1, 0), (2, 0), (4, 0)],
"DistRoom3x10_1Goals": [(9, 0)],
"DistLargeUMaze_2Goals": [(2, 2), (0, 4)],
"DistLargeUMaze_4Goals": [(2, 1), (2, 2), (2, 3), (0, 4)],
}
REWARD_THRESHOLDS = {
"DistRoom3x5_1Goals": RewardThresholdList([-70], [-70], None),
"DistRoom3x5WayPoint_3Goals": RewardThresholdList(
[-20, -40, 70], [-20, -40, -70], None
),
"DistRoom3x10_1Goals": RewardThresholdList([-70], [-690], None),
"DistLargeUMaze_2Goals": RewardThresholdList([-300, -700], [-50, -100], None),
"DistLargeUMaze_4Goals": RewardThresholdList(
[-200, -400, -600, -800], [-25, -50, -75, -100], None
),
}
@staticmethod
def keys() -> List[str]:
return list(ExpertTaskRegistry.REGISTRY.keys())
@staticmethod
def tasks(key: str) -> List[Type[MazeTask]]:
return ExpertTaskRegistry.REGISTRY[key]
@staticmethod
def goals(key: str) -> List[Type[MazeTask]]:
return ExpertTaskRegistry.GOALS[key]
@staticmethod
def reward_thresholds(key: str) -> List[Type[MazeTask]]:
return ExpertTaskRegistry.REWARD_THRESHOLDS[key]
| [
"mujoco_maze.task_common.euc_dist",
"mujoco_maze.task_common.RewardThresholdList",
"numpy.argmax",
"numpy.sum",
"numpy.array"
] | [((4318, 4357), 'mujoco_maze.task_common.RewardThresholdList', 'RewardThresholdList', (['[-70]', '[-70]', 'None'], {}), '([-70], [-70], None)\n', (4337, 4357), False, 'from mujoco_maze.task_common import MazeGoal, MazeTask, GREEN, euc_dist, RewardThresholdList\n'), ((4397, 4455), 'mujoco_maze.task_common.RewardThresholdList', 'RewardThresholdList', (['[-20, -40, 70]', '[-20, -40, -70]', 'None'], {}), '([-20, -40, 70], [-20, -40, -70], None)\n', (4416, 4455), False, 'from mujoco_maze.task_common import MazeGoal, MazeTask, GREEN, euc_dist, RewardThresholdList\n'), ((4510, 4550), 'mujoco_maze.task_common.RewardThresholdList', 'RewardThresholdList', (['[-70]', '[-690]', 'None'], {}), '([-70], [-690], None)\n', (4529, 4550), False, 'from mujoco_maze.task_common import MazeGoal, MazeTask, GREEN, euc_dist, RewardThresholdList\n'), ((4585, 4637), 'mujoco_maze.task_common.RewardThresholdList', 'RewardThresholdList', (['[-300, -700]', '[-50, -100]', 'None'], {}), '([-300, -700], [-50, -100], None)\n', (4604, 4637), False, 'from mujoco_maze.task_common import MazeGoal, MazeTask, GREEN, euc_dist, RewardThresholdList\n'), ((4672, 4746), 'mujoco_maze.task_common.RewardThresholdList', 'RewardThresholdList', (['[-200, -400, -600, -800]', '[-25, -50, -75, -100]', 'None'], {}), '([-200, -400, -600, -800], [-25, -50, -75, -100], None)\n', (4691, 4746), False, 'from mujoco_maze.task_common import MazeGoal, MazeTask, GREEN, euc_dist, RewardThresholdList\n'), ((2338, 2362), 'numpy.argmax', 'np.argmax', (['(~self.visited)'], {}), '(~self.visited)\n', (2347, 2362), True, 'import numpy as np\n'), ((2426, 2458), 'numpy.sum', 'np.sum', (['self.rews[goal_idx + 1:]'], {}), '(self.rews[goal_idx + 1:])\n', (2432, 2458), True, 'import numpy as np\n'), ((1656, 1695), 'mujoco_maze.task_common.euc_dist', 'euc_dist', (['self.waypoints[0].pos', '[0, 0]'], {}), '(self.waypoints[0].pos, [0, 0])\n', (1664, 1695), False, 'from mujoco_maze.task_common import MazeGoal, MazeTask, GREEN, euc_dist, RewardThresholdList\n'), ((1928, 1979), 'mujoco_maze.task_common.euc_dist', 'euc_dist', (['self.waypoints[-1].pos', 'self.goals[0].pos'], {}), '(self.waypoints[-1].pos, self.goals[0].pos)\n', (1936, 1979), False, 'from mujoco_maze.task_common import MazeGoal, MazeTask, GREEN, euc_dist, RewardThresholdList\n'), ((671, 685), 'numpy.array', 'np.array', (['goal'], {}), '(goal)\n', (679, 685), True, 'import numpy as np\n'), ((1090, 1104), 'numpy.array', 'np.array', (['goal'], {}), '(goal)\n', (1098, 1104), True, 'import numpy as np\n'), ((1803, 1861), 'mujoco_maze.task_common.euc_dist', 'euc_dist', (['self.waypoints[i - 1].pos', 'self.waypoints[i].pos'], {}), '(self.waypoints[i - 1].pos, self.waypoints[i].pos)\n', (1811, 1861), False, 'from mujoco_maze.task_common import MazeGoal, MazeTask, GREEN, euc_dist, RewardThresholdList\n'), ((2981, 2995), 'numpy.array', 'np.array', (['goal'], {}), '(goal)\n', (2989, 2995), True, 'import numpy as np\n'), ((3454, 3468), 'numpy.array', 'np.array', (['goal'], {}), '(goal)\n', (3462, 3468), True, 'import numpy as np\n'), ((1259, 1277), 'numpy.array', 'np.array', (['waypoint'], {}), '(waypoint)\n', (1267, 1277), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import argparse
import json
import logging
import socket
import socketserver
import time
from collections import deque
from queue import Queue
from threading import Thread
import Ice
logger = logging.getLogger(__name__)
def retrieve_server_state():
try:
import mice3 as mice
server = mice.s
users = server.getUsers()
except Ice.SocketException:
return {"error": "offline"}
final_users = {}
for session_id, user in users.items():
final_user = {
'name': user.name
}
if user.deaf or user.selfDeaf:
final_user['status'] = 'deaf'
elif user.mute or user.selfMute or user.suppress:
final_user['status'] = 'mute'
final_users[session_id] = final_user
return {
"users": final_users
}
def create_request_handler(client_queues, max_interval):
class RequestHandler(socketserver.StreamRequestHandler):
disable_nagle_algorithm = True
def setup(self):
super().setup()
self.queue = Queue()
client_queues.append(self.queue)
def finish(self):
super().finish()
client_queues.remove(self.queue)
def handle(self):
message = {'params': {
'max_interval': max_interval,
}}
self._send_message(message)
state = retrieve_server_state()
while True:
self._send_message(state)
state = self.queue.get()
def _send_message(self, message):
data = json.dumps(message)
self.wfile.write(data.encode() + b'\n')
return RequestHandler
class TCPServer(socketserver.ThreadingTCPServer):
address_family = socket.AF_INET6
allow_reuse_address = True
def handle_error(self, request, client_address):
logging.debug("User disconnected: %s", client_address)
def mumble_thread(queues, max_interval, polling_interval):
max_skips = max_interval // polling_interval
state = None
skip_count = 0
while True:
old_state = state
state = retrieve_server_state()
if skip_count < max_skips and state == old_state:
skip_count += 1
else:
skip_count = 0
for q in queues.copy():
q.put(state)
time.sleep(polling_interval)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-l', '--host', default='::',
help="Listening address"
)
parser.add_argument('-p', '--port', type=int, default=43223,
help="Listening port"
)
parser.add_argument(
'-i', '--interval', type=float, default=0.5,
help="Interval between polls (in seconds)",
)
parser.add_argument(
'--max', type=float, default=30,
help="Maximum interval in seconds between updates (keep-alive interval)",
)
parser.add_argument('-d', '--debug', action="store_true")
args = parser.parse_args()
logging.basicConfig(
level=logging.DEBUG if args.debug else logging.INFO,
format='%(message)s'
)
client_queues = deque()
thread = Thread(
target=mumble_thread,
name="mumble_thread",
args=(client_queues, args.max, args.interval)
)
thread.start()
request_handler = create_request_handler(
client_queues, args.max
)
with TCPServer((args.host, args.port), request_handler) as server:
logging.info(
f"Listening for connections on [{args.host}]:{args.port}"
)
server.serve_forever()
| [
"logging.getLogger",
"logging.basicConfig",
"collections.deque",
"logging.debug",
"argparse.ArgumentParser",
"json.dumps",
"time.sleep",
"threading.Thread",
"queue.Queue",
"logging.info"
] | [((219, 246), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (236, 246), False, 'import logging\n'), ((2448, 2473), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2471, 2473), False, 'import argparse\n'), ((3056, 3154), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': '(logging.DEBUG if args.debug else logging.INFO)', 'format': '"""%(message)s"""'}), "(level=logging.DEBUG if args.debug else logging.INFO,\n format='%(message)s')\n", (3075, 3154), False, 'import logging\n'), ((3194, 3201), 'collections.deque', 'deque', ([], {}), '()\n', (3199, 3201), False, 'from collections import deque\n'), ((3216, 3317), 'threading.Thread', 'Thread', ([], {'target': 'mumble_thread', 'name': '"""mumble_thread"""', 'args': '(client_queues, args.max, args.interval)'}), "(target=mumble_thread, name='mumble_thread', args=(client_queues,\n args.max, args.interval))\n", (3222, 3317), False, 'from threading import Thread\n'), ((1894, 1948), 'logging.debug', 'logging.debug', (['"""User disconnected: %s"""', 'client_address'], {}), "('User disconnected: %s', client_address)\n", (1907, 1948), False, 'import logging\n'), ((2377, 2405), 'time.sleep', 'time.sleep', (['polling_interval'], {}), '(polling_interval)\n', (2387, 2405), False, 'import time\n'), ((3527, 3598), 'logging.info', 'logging.info', (['f"""Listening for connections on [{args.host}]:{args.port}"""'], {}), "(f'Listening for connections on [{args.host}]:{args.port}')\n", (3539, 3598), False, 'import logging\n'), ((1082, 1089), 'queue.Queue', 'Queue', ([], {}), '()\n', (1087, 1089), False, 'from queue import Queue\n'), ((1613, 1632), 'json.dumps', 'json.dumps', (['message'], {}), '(message)\n', (1623, 1632), False, 'import json\n')] |
import cv2
import numpy as np
img1 = cv2.imread('3D-Matplotlib.png')
img2 = cv2.imread('mainlogo.png')
# THREE DIFFERENT WAYS OF ADDING TWO PICTURE
#1
#add = img1+ img2
#2
#img = cv2.add(img1,img2) # USING BUILT IN FUNCTION OF CV2 TO ADD TWO IMAGES
#3
#weighted_add = cv2.addWeighted(img1, 0.6, img2, 0.4, 0) # 60% OF WEIGHT AND 40% WEIGHT OF IMG1 1ND IMG2 WILL BE ADDED ANG GAMMA VALUE IS ZERO
# MASKING
rows, cols, channels = img2.shape
roi = img1[0:rows, 0:cols]
img2gray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(img2gray, 220,255,cv2.THRESH_BINARY_INV)
mask_inv = cv2.bitwise_not(mask)
# Now black-out the area of logo in ROI
img1_bg = cv2.bitwise_and(roi,roi,mask = mask_inv)
# Take only region of logo from logo image.
img2_fg = cv2.bitwise_and(img2,img2,mask = mask)
dst = cv2.add(img1_bg,img2_fg)
img1[0:rows, 0:cols ] = dst
cv2.imshow('res',img1)
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
"cv2.threshold",
"cv2.bitwise_and",
"cv2.imshow",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.cvtColor",
"cv2.bitwise_not",
"cv2.imread",
"cv2.add"
] | [((41, 72), 'cv2.imread', 'cv2.imread', (['"""3D-Matplotlib.png"""'], {}), "('3D-Matplotlib.png')\n", (51, 72), False, 'import cv2\n'), ((81, 107), 'cv2.imread', 'cv2.imread', (['"""mainlogo.png"""'], {}), "('mainlogo.png')\n", (91, 107), False, 'import cv2\n'), ((505, 543), 'cv2.cvtColor', 'cv2.cvtColor', (['img2', 'cv2.COLOR_BGR2GRAY'], {}), '(img2, cv2.COLOR_BGR2GRAY)\n', (517, 543), False, 'import cv2\n'), ((557, 613), 'cv2.threshold', 'cv2.threshold', (['img2gray', '(220)', '(255)', 'cv2.THRESH_BINARY_INV'], {}), '(img2gray, 220, 255, cv2.THRESH_BINARY_INV)\n', (570, 613), False, 'import cv2\n'), ((624, 645), 'cv2.bitwise_not', 'cv2.bitwise_not', (['mask'], {}), '(mask)\n', (639, 645), False, 'import cv2\n'), ((700, 740), 'cv2.bitwise_and', 'cv2.bitwise_and', (['roi', 'roi'], {'mask': 'mask_inv'}), '(roi, roi, mask=mask_inv)\n', (715, 740), False, 'import cv2\n'), ((799, 837), 'cv2.bitwise_and', 'cv2.bitwise_and', (['img2', 'img2'], {'mask': 'mask'}), '(img2, img2, mask=mask)\n', (814, 837), False, 'import cv2\n'), ((847, 872), 'cv2.add', 'cv2.add', (['img1_bg', 'img2_fg'], {}), '(img1_bg, img2_fg)\n', (854, 872), False, 'import cv2\n'), ((904, 927), 'cv2.imshow', 'cv2.imshow', (['"""res"""', 'img1'], {}), "('res', img1)\n", (914, 927), False, 'import cv2\n'), ((928, 942), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (939, 942), False, 'import cv2\n'), ((944, 967), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (965, 967), False, 'import cv2\n')] |
from typing import Optional, Dict, List, Any
from itertools import product
from paper.experiments.global_variables import TRAINING_DATASET_FRACTIONS
########################################################################################################################
# 3. APP.G.2 (Variants: Cool-down epochs)
########################################################################################################################
def experiments_variants_cooldown(exp_filter: Optional[Dict[str, float]] = None) -> List[Dict[str, Any]]:
exp = [
{
"experiment_name": f"exp_3B_c{c}_x{x}_l{lr_cooldown_epochs}",
"a": "adaptive",
"s": "bio",
"c": c,
"x": x,
"prune_ratio_val": x,
"lr_cooldown_epochs": lr_cooldown_epochs,
}
for c, x, lr_cooldown_epochs in product(["II", "III", "IV"], TRAINING_DATASET_FRACTIONS, [5, 9])
if ("c" not in exp_filter or c == exp_filter["c"])
and ("x" not in exp_filter or x == exp_filter["x"])
]
print(f"3. EXPERIMENTS_VARIANTS_COOLDOWN: {len(exp)} = "
f"3 x {len(TRAINING_DATASET_FRACTIONS)} x 2")
return exp
| [
"itertools.product"
] | [((868, 932), 'itertools.product', 'product', (["['II', 'III', 'IV']", 'TRAINING_DATASET_FRACTIONS', '[5, 9]'], {}), "(['II', 'III', 'IV'], TRAINING_DATASET_FRACTIONS, [5, 9])\n", (875, 932), False, 'from itertools import product\n')] |
from rest_framework import exceptions
from rest_framework import status
from rest_framework.response import Response
from django.core.exceptions import PermissionDenied as DjangoPermissionDenied
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from django.http import Http404
class BaseException(exceptions.APIException):
status_code = status.HTTP_400_BAD_REQUEST
default_detail = _("Unexpected error")
def __init__(self, detail=None):
self.detail = detail or self.default_detail
class NotFound(BaseException, Http404):
"""
Exception used for not found objects.
"""
status_code = status.HTTP_404_NOT_FOUND
default_detail = _("Not found.")
class NotSupported(BaseException):
status_code = status.HTTP_405_METHOD_NOT_ALLOWED
default_detail = _("Method not supported for this endpoint.")
class BadRequest(BaseException):
"""
Exception used on bad arguments detected
on api view.
"""
default_detail = _("Wrong arguments.")
class WrongArguments(BadRequest):
"""
Exception used on bad arguments detected
on service. This is same as `BadRequest`.
"""
default_detail = _("Wrong arguments.")
class RequestValidationError(BadRequest):
default_detail = _("Data validation error")
class PermissionDenied(exceptions.PermissionDenied):
"""
Compatibility subclass of restframework `PermissionDenied`
exception.
"""
pass
class IntegrityError(BadRequest):
default_detail = _("Integrity Error for wrong or invalid arguments")
class PreconditionError(BadRequest):
"""
Error raised on precondition method on viewset.
"""
default_detail = _("Precondition error")
class NotAuthenticated(exceptions.NotAuthenticated):
"""
Compatibility subclass of restframework `NotAuthenticated`
exception.
"""
pass
def format_exception(exc):
if isinstance(exc.detail, (dict, list, tuple,)):
detail = exc.detail
else:
class_name = exc.__class__.__name__
class_module = exc.__class__.__module__
detail = {
"_error_message": force_text(exc.detail),
"_error_type": "{0}.{1}".format(class_module, class_name)
}
return detail
def exception_handler(exc):
"""
Returns the response that should be used for any given exception.
By default we handle the REST framework `APIException`, and also
Django's builtin `Http404` and `PermissionDenied` exceptions.
Any unhandled exceptions may return `None`, which will cause a 500 error
to be raised.
"""
if isinstance(exc, exceptions.APIException):
headers = {}
if getattr(exc, "auth_header", None):
headers["WWW-Authenticate"] = exc.auth_header
if getattr(exc, "wait", None):
headers["X-Throttle-Wait-Seconds"] = "%d" % exc.wait
detail = format_exception(exc)
return Response(detail, status=exc.status_code, headers=headers)
elif isinstance(exc, Http404):
return Response({'_error_message': str(exc)},
status=status.HTTP_404_NOT_FOUND)
elif isinstance(exc, DjangoPermissionDenied):
return Response({"_error_message": str(exc)},
status=status.HTTP_403_FORBIDDEN)
# Note: Unhandled exceptions will raise a 500 error.
return None
class DependencyError(Exception):
"""
Unmet dependency
"""
pass | [
"django.utils.encoding.force_text",
"rest_framework.response.Response",
"django.utils.translation.ugettext_lazy"
] | [((445, 466), 'django.utils.translation.ugettext_lazy', '_', (['"""Unexpected error"""'], {}), "('Unexpected error')\n", (446, 466), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((723, 738), 'django.utils.translation.ugettext_lazy', '_', (['"""Not found."""'], {}), "('Not found.')\n", (724, 738), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((850, 894), 'django.utils.translation.ugettext_lazy', '_', (['"""Method not supported for this endpoint."""'], {}), "('Method not supported for this endpoint.')\n", (851, 894), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1029, 1050), 'django.utils.translation.ugettext_lazy', '_', (['"""Wrong arguments."""'], {}), "('Wrong arguments.')\n", (1030, 1050), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1215, 1236), 'django.utils.translation.ugettext_lazy', '_', (['"""Wrong arguments."""'], {}), "('Wrong arguments.')\n", (1216, 1236), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1302, 1328), 'django.utils.translation.ugettext_lazy', '_', (['"""Data validation error"""'], {}), "('Data validation error')\n", (1303, 1328), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1544, 1595), 'django.utils.translation.ugettext_lazy', '_', (['"""Integrity Error for wrong or invalid arguments"""'], {}), "('Integrity Error for wrong or invalid arguments')\n", (1545, 1595), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1724, 1747), 'django.utils.translation.ugettext_lazy', '_', (['"""Precondition error"""'], {}), "('Precondition error')\n", (1725, 1747), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2972, 3029), 'rest_framework.response.Response', 'Response', (['detail'], {'status': 'exc.status_code', 'headers': 'headers'}), '(detail, status=exc.status_code, headers=headers)\n', (2980, 3029), False, 'from rest_framework.response import Response\n'), ((2167, 2189), 'django.utils.encoding.force_text', 'force_text', (['exc.detail'], {}), '(exc.detail)\n', (2177, 2189), False, 'from django.utils.encoding import force_text\n')] |
"""Tests for qnaplxdunpriv.py
"""
# pylint: disable=missing-function-docstring
import grp
import os
import pathlib
import pwd
import re
import subprocess
import pytest
from qnaplxdunpriv import FileAclError, set_uids, unset_uids
import qnaplxdunpriv
_TEST_UID = 10000
_TEST_FILE = 'file'
_OWNER_GROUP_RE = re.compile(r'^group::([r-][w-][x-])$', re.MULTILINE)
@pytest.mark.parametrize('mode,user_perms,group_perms', [
('770', 'r-x', 'rwx'), ('640', 'r--', 'r--')])
def test_set_unset_uids_file(uids: list[int], target_file: str, mode: str,
user_perms: str, group_perms: str) -> None:
owner_gid = os.stat(target_file).st_gid
subprocess.run(('chmod', mode, target_file), check=True)
set_uids(target_file, uids)
result = subprocess.run(
('getfacl', '-nps', target_file), capture_output=True, check=True)
for uid in uids:
assert f'user:{uid}:{user_perms}'.encode() in result.stdout
assert f'group:{owner_gid}:{group_perms}'.encode() in result.stdout
unset_uids(target_file, uids)
result = subprocess.run(
('getfacl', '-nps', target_file), capture_output=True, check=True)
assert result.stdout == b''
def test_set_uids_not_exist(tmp_path: pathlib.Path) -> None:
target_file = str(tmp_path / 'not_exist')
with pytest.raises(FileAclError) as excinfo:
set_uids(target_file, [_TEST_UID])
assert 'No such file or directory' in str(excinfo.value)
def test_set_uids_link(target_symlink: str) -> None:
set_uids(target_symlink, [_TEST_UID])
result = subprocess.run(
('getfacl', '-nps', target_symlink), capture_output=True, check=True)
assert result.stdout == b''
def test_set_uids_link_ne(target_symlink_ne: str) -> None:
set_uids(target_symlink_ne, [_TEST_UID])
@pytest.mark.parametrize('mode,user_perms,group_perms', [
('770', 'r-x', 'rwx'), ('640', 'r--', 'r--')])
def test_set_uids_dry_run(
target_file: str, capsys: pytest.CaptureFixture, mode: str,
user_perms: str, group_perms: str) -> None:
owner_gid = os.stat(target_file).st_gid
try:
user_name = pwd.getpwuid(_TEST_UID).pw_name.replace(' ', '\\040')
except KeyError:
user_name = str(_TEST_UID)
try:
group_name = grp.getgrgid(owner_gid).gr_name.replace(' ', '\\040')
except KeyError:
group_name = str(owner_gid)
subprocess.run(('chmod', mode, target_file), check=True)
set_uids(target_file, [_TEST_UID], dry_run=True)
output = capsys.readouterr().out
assert f'# file: {target_file}' in output
assert '# owner: ' in output
assert '# group: ' in output
assert f'user:{user_name}:{user_perms}' in output
assert f'group:{group_name}:{group_perms}' in output
result = subprocess.run(
('getfacl', '-nps', target_file), capture_output=True, check=True)
assert result.stdout == b''
@pytest.mark.parametrize('user', [
[],
['-m', f'user:{_TEST_UID}:r--'],
])
def test_unset_uids_file(target_file: str, user: list[str]) -> None:
owner_gid = os.stat(target_file).st_gid
owner_group_perms = _get_owner_group_perms(target_file)
group_args: list[list[str]] = [
[], ['-m', f'group:{owner_gid}:{owner_group_perms}']]
for group in group_args:
args = user + group
if args:
subprocess.run(['setfacl'] + args + [target_file], check=True)
unset_uids(target_file, [_TEST_UID])
result = subprocess.run(
('getfacl', '-nps', target_file), capture_output=True, check=True)
assert result.stdout == b''
def test_unset_uids_file_2(target_file: str) -> None:
owner_gid = os.stat(target_file).st_gid
subprocess.run((
'setfacl', '-m', f'user:{_TEST_UID}:r--',
'-m', f'user:{_TEST_UID * 2}:r--', '-m', f'group:{owner_gid}:r--',
target_file), check=True)
unset_uids(target_file, [_TEST_UID])
result = subprocess.run(
('getfacl', '-nps', target_file), capture_output=True, check=True)
assert f'user:{_TEST_UID}:r--'.encode() not in result.stdout
assert f'user:{_TEST_UID * 2}:r--'.encode() in result.stdout
assert f'group:{owner_gid}:r--'.encode() in result.stdout
def test_unset_uids_file_different_perms(target_file: str) -> None:
owner_gid = os.stat(target_file).st_gid
owner_group_perms = _get_owner_group_perms(target_file)
group_perms = 'r-x' if owner_group_perms != 'r-x' else 'r--'
subprocess.run((
'setfacl', '-m', f'user:{_TEST_UID}:r--',
'-m', f'group:{owner_gid}:{group_perms}',
target_file), check=True)
unset_uids(target_file, [_TEST_UID])
result = subprocess.run(
('getfacl', '-nps', target_file), capture_output=True, check=True)
assert f'user:{_TEST_UID}:r--'.encode() not in result.stdout
assert f'group:{owner_gid}:{group_perms}'.encode() in result.stdout
def test_unset_uids_default(target_dir: str) -> None:
subprocess.run((
'setfacl', '-m', 'default:user::r--', target_dir), check=True)
unset_uids(target_dir, [_TEST_UID])
result = subprocess.run(
('getfacl', '-nps', target_dir), capture_output=True, check=True)
assert 'default:user::r--'.encode() in result.stdout
def test_unset_uids_not_exist(tmp_path: pathlib.Path) -> None:
target_file = str(tmp_path / 'not_exist')
with pytest.raises(FileAclError) as excinfo:
unset_uids(target_file, [_TEST_UID])
assert 'No such file or directory' in str(excinfo.value)
def test_unset_uids_link(target_symlink: str) -> None:
owner_gid = os.stat(target_symlink).st_gid
subprocess.run((
'setfacl', '-m', f'user:{_TEST_UID}:r--',
'-m', f'group:{owner_gid}:r--', target_symlink), check=True)
before = subprocess.run(
('getfacl', '-nps', target_symlink), capture_output=True, check=True)
unset_uids(target_symlink, [_TEST_UID])
after = subprocess.run(
('getfacl', '-nps', target_symlink), capture_output=True, check=True)
assert before.stdout == after.stdout
def test_unset_uids_link_ne(target_symlink_ne: str) -> None:
unset_uids(target_symlink_ne, [_TEST_UID])
def test_main(tmp_path: pathlib.Path) -> None:
station_root = tmp_path / qnaplxdunpriv.STATION_DEFAULT.lstrip('/')
station_root.mkdir(parents=True)
for dir_name in (qnaplxdunpriv.CONTAINER_STATION_PATHS
+ qnaplxdunpriv.CONTAINER_STATION_RECURSE_PATHS):
_make_files(station_root, dir_name)
container_root = tmp_path / qnaplxdunpriv.CONTAINER_DEFAULT.lstrip('/')
container_root.mkdir()
for dir_name in qnaplxdunpriv.CONTAINER_PATHS:
_make_files(container_root, dir_name)
ret = qnaplxdunpriv.main([
'--station', str(station_root), '--container', str(container_root),
'set', str(_TEST_UID)])
assert ret == 0
for dir_name in qnaplxdunpriv.CONTAINER_STATION_PATHS:
dir_path = station_root / dir_name
_assert_extended_acl(dir_path)
if dir_name:
_assert_base_acl(dir_path / _TEST_FILE)
for dir_name in qnaplxdunpriv.CONTAINER_STATION_RECURSE_PATHS:
dir_path = station_root / dir_name
_assert_extended_acl(dir_path)
if dir_name:
_assert_extended_acl(dir_path / _TEST_FILE)
for dir_name in qnaplxdunpriv.CONTAINER_PATHS:
dir_path = container_root / dir_name
_assert_extended_acl(dir_path)
if dir_name:
_assert_base_acl(dir_path / _TEST_FILE)
ret = qnaplxdunpriv.main([
'--station', str(station_root), '--container', str(container_root),
'unset', str(_TEST_UID)])
assert ret == 0
result = subprocess.run(
('getfacl', '-Rnps', str(tmp_path)), capture_output=True, check=True)
assert result.stdout == b''
def _make_files(root: pathlib.Path, dir_name: str) -> None:
if not dir_name:
return
dir_path = root / dir_name
dir_path.mkdir(parents=True)
file_path = dir_path / _TEST_FILE
file_path.write_text('contents')
def _assert_extended_acl(path: pathlib.Path) -> None:
assert _getfacl(path)
def _assert_base_acl(path: pathlib.Path) -> None:
assert not _getfacl(path)
def _getfacl(path: pathlib.Path) -> bytes:
retult = subprocess.run(
('getfacl', '-nps', str(path)), capture_output=True, check=True)
return retult.stdout
def test_main_error() -> None:
with pytest.raises(SystemExit) as ex:
qnaplxdunpriv.main([])
assert ex.value.code != 0
def _get_owner_group_perms(target_file: str) -> str:
result = subprocess.run(
('getfacl', '-np', target_file), capture_output=True, check=True)
match = _OWNER_GROUP_RE.search(result.stdout.decode())
if not match:
pytest.fail('Failed to get owner group permissions.')
return match.group(1)
| [
"re.compile",
"subprocess.run",
"qnaplxdunpriv.main",
"qnaplxdunpriv.CONTAINER_DEFAULT.lstrip",
"pytest.fail",
"pytest.mark.parametrize",
"qnaplxdunpriv.unset_uids",
"pytest.raises",
"pwd.getpwuid",
"os.stat",
"qnaplxdunpriv.STATION_DEFAULT.lstrip",
"grp.getgrgid",
"qnaplxdunpriv.set_uids"
] | [((311, 362), 're.compile', 're.compile', (['"""^group::([r-][w-][x-])$"""', 're.MULTILINE'], {}), "('^group::([r-][w-][x-])$', re.MULTILINE)\n", (321, 362), False, 'import re\n'), ((367, 473), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode,user_perms,group_perms"""', "[('770', 'r-x', 'rwx'), ('640', 'r--', 'r--')]"], {}), "('mode,user_perms,group_perms', [('770', 'r-x',\n 'rwx'), ('640', 'r--', 'r--')])\n", (390, 473), False, 'import pytest\n'), ((1802, 1908), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode,user_perms,group_perms"""', "[('770', 'r-x', 'rwx'), ('640', 'r--', 'r--')]"], {}), "('mode,user_perms,group_perms', [('770', 'r-x',\n 'rwx'), ('640', 'r--', 'r--')])\n", (1825, 1908), False, 'import pytest\n'), ((2894, 2964), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""user"""', "[[], ['-m', f'user:{_TEST_UID}:r--']]"], {}), "('user', [[], ['-m', f'user:{_TEST_UID}:r--']])\n", (2917, 2964), False, 'import pytest\n'), ((671, 727), 'subprocess.run', 'subprocess.run', (["('chmod', mode, target_file)"], {'check': '(True)'}), "(('chmod', mode, target_file), check=True)\n", (685, 727), False, 'import subprocess\n'), ((732, 759), 'qnaplxdunpriv.set_uids', 'set_uids', (['target_file', 'uids'], {}), '(target_file, uids)\n', (740, 759), False, 'from qnaplxdunpriv import FileAclError, set_uids, unset_uids\n'), ((773, 859), 'subprocess.run', 'subprocess.run', (["('getfacl', '-nps', target_file)"], {'capture_output': '(True)', 'check': '(True)'}), "(('getfacl', '-nps', target_file), capture_output=True, check\n =True)\n", (787, 859), False, 'import subprocess\n'), ((1029, 1058), 'qnaplxdunpriv.unset_uids', 'unset_uids', (['target_file', 'uids'], {}), '(target_file, uids)\n', (1039, 1058), False, 'from qnaplxdunpriv import FileAclError, set_uids, unset_uids\n'), ((1072, 1158), 'subprocess.run', 'subprocess.run', (["('getfacl', '-nps', target_file)"], {'capture_output': '(True)', 'check': '(True)'}), "(('getfacl', '-nps', target_file), capture_output=True, check\n =True)\n", (1086, 1158), False, 'import subprocess\n'), ((1516, 1553), 'qnaplxdunpriv.set_uids', 'set_uids', (['target_symlink', '[_TEST_UID]'], {}), '(target_symlink, [_TEST_UID])\n', (1524, 1553), False, 'from qnaplxdunpriv import FileAclError, set_uids, unset_uids\n'), ((1567, 1655), 'subprocess.run', 'subprocess.run', (["('getfacl', '-nps', target_symlink)"], {'capture_output': '(True)', 'check': '(True)'}), "(('getfacl', '-nps', target_symlink), capture_output=True,\n check=True)\n", (1581, 1655), False, 'import subprocess\n'), ((1758, 1798), 'qnaplxdunpriv.set_uids', 'set_uids', (['target_symlink_ne', '[_TEST_UID]'], {}), '(target_symlink_ne, [_TEST_UID])\n', (1766, 1798), False, 'from qnaplxdunpriv import FileAclError, set_uids, unset_uids\n'), ((2385, 2441), 'subprocess.run', 'subprocess.run', (["('chmod', mode, target_file)"], {'check': '(True)'}), "(('chmod', mode, target_file), check=True)\n", (2399, 2441), False, 'import subprocess\n'), ((2446, 2494), 'qnaplxdunpriv.set_uids', 'set_uids', (['target_file', '[_TEST_UID]'], {'dry_run': '(True)'}), '(target_file, [_TEST_UID], dry_run=True)\n', (2454, 2494), False, 'from qnaplxdunpriv import FileAclError, set_uids, unset_uids\n'), ((2768, 2854), 'subprocess.run', 'subprocess.run', (["('getfacl', '-nps', target_file)"], {'capture_output': '(True)', 'check': '(True)'}), "(('getfacl', '-nps', target_file), capture_output=True, check\n =True)\n", (2782, 2854), False, 'import subprocess\n'), ((3693, 3851), 'subprocess.run', 'subprocess.run', (["('setfacl', '-m', f'user:{_TEST_UID}:r--', '-m',\n f'user:{_TEST_UID * 2}:r--', '-m', f'group:{owner_gid}:r--', target_file)"], {'check': '(True)'}), "(('setfacl', '-m', f'user:{_TEST_UID}:r--', '-m',\n f'user:{_TEST_UID * 2}:r--', '-m', f'group:{owner_gid}:r--',\n target_file), check=True)\n", (3707, 3851), False, 'import subprocess\n'), ((3873, 3909), 'qnaplxdunpriv.unset_uids', 'unset_uids', (['target_file', '[_TEST_UID]'], {}), '(target_file, [_TEST_UID])\n', (3883, 3909), False, 'from qnaplxdunpriv import FileAclError, set_uids, unset_uids\n'), ((3923, 4009), 'subprocess.run', 'subprocess.run', (["('getfacl', '-nps', target_file)"], {'capture_output': '(True)', 'check': '(True)'}), "(('getfacl', '-nps', target_file), capture_output=True, check\n =True)\n", (3937, 4009), False, 'import subprocess\n'), ((4449, 4578), 'subprocess.run', 'subprocess.run', (["('setfacl', '-m', f'user:{_TEST_UID}:r--', '-m',\n f'group:{owner_gid}:{group_perms}', target_file)"], {'check': '(True)'}), "(('setfacl', '-m', f'user:{_TEST_UID}:r--', '-m',\n f'group:{owner_gid}:{group_perms}', target_file), check=True)\n", (4463, 4578), False, 'import subprocess\n'), ((4604, 4640), 'qnaplxdunpriv.unset_uids', 'unset_uids', (['target_file', '[_TEST_UID]'], {}), '(target_file, [_TEST_UID])\n', (4614, 4640), False, 'from qnaplxdunpriv import FileAclError, set_uids, unset_uids\n'), ((4654, 4740), 'subprocess.run', 'subprocess.run', (["('getfacl', '-nps', target_file)"], {'capture_output': '(True)', 'check': '(True)'}), "(('getfacl', '-nps', target_file), capture_output=True, check\n =True)\n", (4668, 4740), False, 'import subprocess\n'), ((4942, 5020), 'subprocess.run', 'subprocess.run', (["('setfacl', '-m', 'default:user::r--', target_dir)"], {'check': '(True)'}), "(('setfacl', '-m', 'default:user::r--', target_dir), check=True)\n", (4956, 5020), False, 'import subprocess\n'), ((5034, 5069), 'qnaplxdunpriv.unset_uids', 'unset_uids', (['target_dir', '[_TEST_UID]'], {}), '(target_dir, [_TEST_UID])\n', (5044, 5069), False, 'from qnaplxdunpriv import FileAclError, set_uids, unset_uids\n'), ((5083, 5168), 'subprocess.run', 'subprocess.run', (["('getfacl', '-nps', target_dir)"], {'capture_output': '(True)', 'check': '(True)'}), "(('getfacl', '-nps', target_dir), capture_output=True, check=True\n )\n", (5097, 5168), False, 'import subprocess\n'), ((5604, 5726), 'subprocess.run', 'subprocess.run', (["('setfacl', '-m', f'user:{_TEST_UID}:r--', '-m', f'group:{owner_gid}:r--',\n target_symlink)"], {'check': '(True)'}), "(('setfacl', '-m', f'user:{_TEST_UID}:r--', '-m',\n f'group:{owner_gid}:r--', target_symlink), check=True)\n", (5618, 5726), False, 'import subprocess\n'), ((5753, 5841), 'subprocess.run', 'subprocess.run', (["('getfacl', '-nps', target_symlink)"], {'capture_output': '(True)', 'check': '(True)'}), "(('getfacl', '-nps', target_symlink), capture_output=True,\n check=True)\n", (5767, 5841), False, 'import subprocess\n'), ((5851, 5890), 'qnaplxdunpriv.unset_uids', 'unset_uids', (['target_symlink', '[_TEST_UID]'], {}), '(target_symlink, [_TEST_UID])\n', (5861, 5890), False, 'from qnaplxdunpriv import FileAclError, set_uids, unset_uids\n'), ((5903, 5991), 'subprocess.run', 'subprocess.run', (["('getfacl', '-nps', target_symlink)"], {'capture_output': '(True)', 'check': '(True)'}), "(('getfacl', '-nps', target_symlink), capture_output=True,\n check=True)\n", (5917, 5991), False, 'import subprocess\n'), ((6105, 6147), 'qnaplxdunpriv.unset_uids', 'unset_uids', (['target_symlink_ne', '[_TEST_UID]'], {}), '(target_symlink_ne, [_TEST_UID])\n', (6115, 6147), False, 'from qnaplxdunpriv import FileAclError, set_uids, unset_uids\n'), ((8564, 8649), 'subprocess.run', 'subprocess.run', (["('getfacl', '-np', target_file)"], {'capture_output': '(True)', 'check': '(True)'}), "(('getfacl', '-np', target_file), capture_output=True, check=True\n )\n", (8578, 8649), False, 'import subprocess\n'), ((639, 659), 'os.stat', 'os.stat', (['target_file'], {}), '(target_file)\n', (646, 659), False, 'import os\n'), ((1313, 1340), 'pytest.raises', 'pytest.raises', (['FileAclError'], {}), '(FileAclError)\n', (1326, 1340), False, 'import pytest\n'), ((1361, 1395), 'qnaplxdunpriv.set_uids', 'set_uids', (['target_file', '[_TEST_UID]'], {}), '(target_file, [_TEST_UID])\n', (1369, 1395), False, 'from qnaplxdunpriv import FileAclError, set_uids, unset_uids\n'), ((2073, 2093), 'os.stat', 'os.stat', (['target_file'], {}), '(target_file)\n', (2080, 2093), False, 'import os\n'), ((3061, 3081), 'os.stat', 'os.stat', (['target_file'], {}), '(target_file)\n', (3068, 3081), False, 'import os\n'), ((3404, 3440), 'qnaplxdunpriv.unset_uids', 'unset_uids', (['target_file', '[_TEST_UID]'], {}), '(target_file, [_TEST_UID])\n', (3414, 3440), False, 'from qnaplxdunpriv import FileAclError, set_uids, unset_uids\n'), ((3458, 3544), 'subprocess.run', 'subprocess.run', (["('getfacl', '-nps', target_file)"], {'capture_output': '(True)', 'check': '(True)'}), "(('getfacl', '-nps', target_file), capture_output=True, check\n =True)\n", (3472, 3544), False, 'import subprocess\n'), ((3661, 3681), 'os.stat', 'os.stat', (['target_file'], {}), '(target_file)\n', (3668, 3681), False, 'import os\n'), ((4292, 4312), 'os.stat', 'os.stat', (['target_file'], {}), '(target_file)\n', (4299, 4312), False, 'import os\n'), ((5350, 5377), 'pytest.raises', 'pytest.raises', (['FileAclError'], {}), '(FileAclError)\n', (5363, 5377), False, 'import pytest\n'), ((5398, 5434), 'qnaplxdunpriv.unset_uids', 'unset_uids', (['target_file', '[_TEST_UID]'], {}), '(target_file, [_TEST_UID])\n', (5408, 5434), False, 'from qnaplxdunpriv import FileAclError, set_uids, unset_uids\n'), ((5569, 5592), 'os.stat', 'os.stat', (['target_symlink'], {}), '(target_symlink)\n', (5576, 5592), False, 'import os\n'), ((6227, 6268), 'qnaplxdunpriv.STATION_DEFAULT.lstrip', 'qnaplxdunpriv.STATION_DEFAULT.lstrip', (['"""/"""'], {}), "('/')\n", (6263, 6268), False, 'import qnaplxdunpriv\n'), ((6512, 6555), 'qnaplxdunpriv.CONTAINER_DEFAULT.lstrip', 'qnaplxdunpriv.CONTAINER_DEFAULT.lstrip', (['"""/"""'], {}), "('/')\n", (6550, 6555), False, 'import qnaplxdunpriv\n'), ((8402, 8427), 'pytest.raises', 'pytest.raises', (['SystemExit'], {}), '(SystemExit)\n', (8415, 8427), False, 'import pytest\n'), ((8443, 8465), 'qnaplxdunpriv.main', 'qnaplxdunpriv.main', (['[]'], {}), '([])\n', (8461, 8465), False, 'import qnaplxdunpriv\n'), ((8739, 8792), 'pytest.fail', 'pytest.fail', (['"""Failed to get owner group permissions."""'], {}), "('Failed to get owner group permissions.')\n", (8750, 8792), False, 'import pytest\n'), ((3333, 3395), 'subprocess.run', 'subprocess.run', (["(['setfacl'] + args + [target_file])"], {'check': '(True)'}), "(['setfacl'] + args + [target_file], check=True)\n", (3347, 3395), False, 'import subprocess\n'), ((2130, 2153), 'pwd.getpwuid', 'pwd.getpwuid', (['_TEST_UID'], {}), '(_TEST_UID)\n', (2142, 2153), False, 'import pwd\n'), ((2270, 2293), 'grp.getgrgid', 'grp.getgrgid', (['owner_gid'], {}), '(owner_gid)\n', (2282, 2293), False, 'import grp\n')] |
from functools import lru_cache
import math
import logging
from enum import Enum
from typing import Optional, List, Tuple, Any, Union, Dict, Callable
from concurrent.futures import ThreadPoolExecutor
from io import BytesIO
import requests
import numpy as np
from google.api_core import retry
from PIL import Image
from pyproj import Transformer
from pygeotile.point import Point as PygeoPoint
from pydantic import BaseModel, validator
from pydantic.class_validators import root_validator
from labelbox.data.annotation_types import Rectangle, Point, Line, Polygon
from .base_data import BaseData
from .raster import RasterData
VALID_LAT_RANGE = range(-90, 90)
VALID_LNG_RANGE = range(-180, 180)
DEFAULT_TMS_TILE_SIZE = 256
TILE_DOWNLOAD_CONCURRENCY = 4
logger = logging.getLogger(__name__)
VectorTool = Union[Point, Line, Rectangle, Polygon]
class EPSG(Enum):
""" Provides the EPSG for tiled image assets that are currently supported.
SIMPLEPIXEL is Simple that can be used to obtain the pixel space coordinates
>>> epsg = EPSG()
"""
SIMPLEPIXEL = 1
EPSG4326 = 4326
EPSG3857 = 3857
class TiledBounds(BaseModel):
""" Bounds for a tiled image asset related to the relevant epsg.
Bounds should be Point objects.
>>> bounds = TiledBounds(epsg=EPSG.EPSG4326,
bounds=[
Point(x=-99.21052827588443, y=19.405662413477728),
Point(x=-99.20534818927473, y=19.400498983095076)
])
"""
epsg: EPSG
bounds: List[Point]
@validator('bounds')
def validate_bounds_not_equal(cls, bounds):
first_bound = bounds[0]
second_bound = bounds[1]
if first_bound.x == second_bound.x or \
first_bound.y == second_bound.y:
raise ValueError(
f"Bounds on either axes cannot be equal, currently {bounds}")
return bounds
#validate bounds are within lat,lng range if they are EPSG4326
@root_validator
def validate_bounds_lat_lng(cls, values):
epsg = values.get('epsg')
bounds = values.get('bounds')
if epsg == EPSG.EPSG4326:
for bound in bounds:
lat, lng = bound.y, bound.x
if int(lng) not in VALID_LNG_RANGE or int(
lat) not in VALID_LAT_RANGE:
raise ValueError(f"Invalid lat/lng bounds. Found {bounds}. "
f"lat must be in {VALID_LAT_RANGE}. "
f"lng must be in {VALID_LNG_RANGE}.")
return values
class TileLayer(BaseModel):
""" Url that contains the tile layer. Must be in the format:
https://c.tile.openstreetmap.org/{z}/{x}/{y}.png
>>> layer = TileLayer(
url="https://c.tile.openstreetmap.org/{z}/{x}/{y}.png",
name="slippy map tile"
)
"""
url: str
name: Optional[str] = "default"
def asdict(self) -> Dict[str, str]:
return {"tileLayerUrl": self.url, "name": self.name}
@validator('url')
def validate_url(cls, url):
xyz_format = "/{z}/{x}/{y}"
if xyz_format not in url:
raise ValueError(f"{url} needs to contain {xyz_format}")
return url
class TiledImageData(BaseData):
""" Represents tiled imagery
If specified version is 2, converts bounds from [lng,lat] to [lat,lng]
Requires the following args:
tile_layer: TileLayer
tile_bounds: TiledBounds
zoom_levels: List[int]
Optional args:
max_native_zoom: int = None
tile_size: Optional[int]
version: int = 2
alternative_layers: List[TileLayer]
>>> tiled_image_data = TiledImageData(tile_layer=TileLayer,
tile_bounds=TiledBounds,
zoom_levels=[1, 12])
"""
tile_layer: TileLayer
tile_bounds: TiledBounds
alternative_layers: List[TileLayer] = []
zoom_levels: Tuple[int, int]
max_native_zoom: Optional[int] = None
tile_size: Optional[int] = DEFAULT_TMS_TILE_SIZE
version: Optional[int] = 2
multithread: bool = True
def __post_init__(self) -> None:
if self.max_native_zoom is None:
self.max_native_zoom = self.zoom_levels[0]
def asdict(self) -> Dict[str, str]:
return {
"tileLayerUrl": self.tile_layer.url,
"bounds": [[
self.tile_bounds.bounds[0].x, self.tile_bounds.bounds[0].y
], [self.tile_bounds.bounds[1].x, self.tile_bounds.bounds[1].y]],
"minZoom": self.zoom_levels[0],
"maxZoom": self.zoom_levels[1],
"maxNativeZoom": self.max_native_zoom,
"epsg": self.tile_bounds.epsg.name,
"tileSize": self.tile_size,
"alternativeLayers": [
layer.asdict() for layer in self.alternative_layers
],
"version": self.version
}
def raster_data(self,
zoom: int = 0,
max_tiles: int = 32,
multithread=True) -> RasterData:
"""Converts the tiled image asset into a RasterData object containing an
np.ndarray.
Uses the minimum zoom provided to render the image.
"""
if self.tile_bounds.epsg == EPSG.SIMPLEPIXEL:
xstart, ystart, xend, yend = self._get_simple_image_params(zoom)
elif self.tile_bounds.epsg == EPSG.EPSG4326:
xstart, ystart, xend, yend = self._get_3857_image_params(
zoom, self.tile_bounds)
elif self.tile_bounds.epsg == EPSG.EPSG3857:
#transform to 4326
transformer = EPSGTransformer.create_geo_to_geo_transformer(
EPSG.EPSG3857, EPSG.EPSG4326)
transforming_bounds = [
transformer(self.tile_bounds.bounds[0]),
transformer(self.tile_bounds.bounds[1])
]
xstart, ystart, xend, yend = self._get_3857_image_params(
zoom, transforming_bounds)
else:
raise ValueError(f"Unsupported epsg found: {self.tile_bounds.epsg}")
self._validate_num_tiles(xstart, ystart, xend, yend, max_tiles)
rounded_tiles, pixel_offsets = list(
zip(*[
self._tile_to_pixel(pt) for pt in [xstart, ystart, xend, yend]
]))
image = self._fetch_image_for_bounds(*rounded_tiles, zoom, multithread)
arr = self._crop_to_bounds(image, *pixel_offsets)
return RasterData(arr=arr)
@property
def value(self) -> np.ndarray:
"""Returns the value of a generated RasterData object.
"""
return self.raster_data(self.zoom_levels[0],
multithread=self.multithread).value
def _get_simple_image_params(self,
zoom) -> Tuple[float, float, float, float]:
"""Computes the x and y tile bounds for fetching an image that
captures the entire labeling region (TiledData.bounds) given a specific zoom
Simple has different order of x / y than lat / lng because of how leaflet behaves
leaflet reports all points as pixel locations at a zoom of 0
"""
xend, xstart, yend, ystart = (
self.tile_bounds.bounds[1].x,
self.tile_bounds.bounds[0].x,
self.tile_bounds.bounds[1].y,
self.tile_bounds.bounds[0].y,
)
return (*[
x * (2**(zoom)) / self.tile_size
for x in [xstart, ystart, xend, yend]
],)
def _get_3857_image_params(
self, zoom: int,
bounds: TiledBounds) -> Tuple[float, float, float, float]:
"""Computes the x and y tile bounds for fetching an image that
captures the entire labeling region (TiledData.bounds) given a specific zoom
"""
lat_start, lat_end = bounds.bounds[1].y, bounds.bounds[0].y
lng_start, lng_end = bounds.bounds[1].x, bounds.bounds[0].x
# Convert to zoom 0 tile coordinates
xstart, ystart = self._latlng_to_tile(lat_start, lng_start)
xend, yend = self._latlng_to_tile(lat_end, lng_end)
# Make sure that the tiles are increasing in order
xstart, xend = min(xstart, xend), max(xstart, xend)
ystart, yend = min(ystart, yend), max(ystart, yend)
return (*[pt * 2.0**zoom for pt in [xstart, ystart, xend, yend]],)
def _latlng_to_tile(self,
lat: float,
lng: float,
zoom=0) -> Tuple[float, float]:
"""Converts lat/lng to 3857 tile coordinates
Formula found here:
https://wiki.openstreetmap.org/wiki/Slippy_map_tilenames#lon.2Flat_to_tile_numbers_2
"""
scale = 2**zoom
lat_rad = math.radians(lat)
x = (lng + 180.0) / 360.0 * scale
y = (1.0 - math.asinh(math.tan(lat_rad)) / math.pi) / 2.0 * scale
return x, y
def _tile_to_pixel(self, tile: float) -> Tuple[int, int]:
"""Rounds a tile coordinate and reports the remainder in pixels
"""
rounded_tile = int(tile)
remainder = tile - rounded_tile
pixel_offset = int(self.tile_size * remainder)
return rounded_tile, pixel_offset
def _fetch_image_for_bounds(self,
x_tile_start: int,
y_tile_start: int,
x_tile_end: int,
y_tile_end: int,
zoom: int,
multithread=True) -> np.ndarray:
"""Fetches the tiles and combines them into a single image.
If a tile cannot be fetched, a padding of expected tile size is instead added.
"""
if multithread:
tiles = {}
with ThreadPoolExecutor(
max_workers=TILE_DOWNLOAD_CONCURRENCY) as exc:
for x in range(x_tile_start, x_tile_end + 1):
for y in range(y_tile_start, y_tile_end + 1):
tiles[(x, y)] = exc.submit(self._fetch_tile, x, y, zoom)
rows = []
for y in range(y_tile_start, y_tile_end + 1):
row = []
for x in range(x_tile_start, x_tile_end + 1):
try:
if multithread:
row.append(tiles[(x, y)].result())
else:
row.append(self._fetch_tile(x, y, zoom))
except:
row.append(
np.zeros(shape=(self.tile_size, self.tile_size, 3),
dtype=np.uint8))
rows.append(np.hstack(row))
return np.vstack(rows)
@retry.Retry(initial=1, maximum=16, multiplier=2)
def _fetch_tile(self, x: int, y: int, z: int) -> np.ndarray:
"""
Fetches the image and returns an np array.
"""
data = requests.get(self.tile_layer.url.format(x=x, y=y, z=z))
data.raise_for_status()
decoded = np.array(Image.open(BytesIO(data.content)))[..., :3]
if decoded.shape[:2] != (self.tile_size, self.tile_size):
logger.warning(f"Unexpected tile size {decoded.shape}.")
return decoded
def _crop_to_bounds(
self,
image: np.ndarray,
x_px_start: int,
y_px_start: int,
x_px_end: int,
y_px_end: int,
) -> np.ndarray:
"""This function slices off the excess pixels that are outside of the bounds.
This occurs because only full tiles can be downloaded at a time.
"""
def invert_point(pt):
# Must have at least 1 pixel for stability.
pt = max(pt, 1)
# All pixel points are relative to a single tile
# So subtracting the tile size inverts the axis
pt = pt - self.tile_size
return pt if pt != 0 else None
x_px_end, y_px_end = invert_point(x_px_end), invert_point(y_px_end)
return image[y_px_start:y_px_end, x_px_start:x_px_end, :]
def _validate_num_tiles(self, xstart: float, ystart: float, xend: float,
yend: float, max_tiles: int):
"""Calculates the number of expected tiles we would fetch.
If this is greater than the number of max tiles, raise an error.
"""
total_n_tiles = (yend - ystart + 1) * (xend - xstart + 1)
if total_n_tiles > max_tiles:
raise ValueError(f"Requested zoom results in {total_n_tiles} tiles."
f"Max allowed tiles are {max_tiles}"
f"Increase max tiles or reduce zoom level.")
@validator('zoom_levels')
def validate_zoom_levels(cls, zoom_levels):
if zoom_levels[0] > zoom_levels[1]:
raise ValueError(
f"Order of zoom levels should be min, max. Received {zoom_levels}"
)
return zoom_levels
class EPSGTransformer(BaseModel):
"""Transformer class between different EPSG's. Useful when wanting to project
in different formats.
"""
class Config:
arbitrary_types_allowed = True
transformer: Any
@staticmethod
def _is_simple(epsg: EPSG) -> bool:
return epsg == EPSG.SIMPLEPIXEL
@staticmethod
def _get_ranges(bounds: np.ndarray) -> Tuple[int, int]:
"""helper function to get the range between bounds.
returns a tuple (x_range, y_range)"""
x_range = np.max(bounds[:, 0]) - np.min(bounds[:, 0])
y_range = np.max(bounds[:, 1]) - np.min(bounds[:, 1])
return (x_range, y_range)
@staticmethod
def _min_max_x_y(bounds: np.ndarray) -> Tuple[int, int, int, int]:
"""returns the min x, max x, min y, max y of a numpy array
"""
return np.min(bounds[:, 0]), np.max(bounds[:, 0]), np.min(
bounds[:, 1]), np.max(bounds[:, 1])
@classmethod
def geo_and_pixel(cls,
src_epsg,
pixel_bounds: TiledBounds,
geo_bounds: TiledBounds,
zoom=0) -> Callable:
"""method to change from one projection to simple projection"""
pixel_bounds = pixel_bounds.bounds
geo_bounds_epsg = geo_bounds.epsg
geo_bounds = geo_bounds.bounds
local_bounds = np.array([(point.x, point.y) for point in pixel_bounds],
dtype=int)
#convert geo bounds to pixel bounds. assumes geo bounds are in wgs84/EPS4326 per leaflet
global_bounds = np.array([
PygeoPoint.from_latitude_longitude(latitude=point.y,
longitude=point.x).pixels(zoom)
for point in geo_bounds
])
#get the range of pixels for both sets of bounds to use as a multiplification factor
local_x_range, local_y_range = cls._get_ranges(bounds=local_bounds)
global_x_range, global_y_range = cls._get_ranges(bounds=global_bounds)
if src_epsg == EPSG.SIMPLEPIXEL:
def transform(x: int, y: int) -> Callable[[int, int], Transformer]:
scaled_xy = (x * (global_x_range) / (local_x_range),
y * (global_y_range) / (local_y_range))
minx, _, miny, _ = cls._min_max_x_y(bounds=global_bounds)
x, y = map(lambda i, j: i + j, scaled_xy, (minx, miny))
point = PygeoPoint.from_pixel(pixel_x=x, pixel_y=y,
zoom=zoom).latitude_longitude
#convert to the desired epsg
return Transformer.from_crs(EPSG.EPSG4326.value,
geo_bounds_epsg.value,
always_xy=True).transform(
point[1], point[0])
return transform
#handles 4326 from lat,lng
elif src_epsg == EPSG.EPSG4326:
def transform(x: int, y: int) -> Callable[[int, int], Transformer]:
point_in_px = PygeoPoint.from_latitude_longitude(
latitude=y, longitude=x).pixels(zoom)
minx, _, miny, _ = cls._min_max_x_y(global_bounds)
x, y = map(lambda i, j: i - j, point_in_px, (minx, miny))
return (x * (local_x_range) / (global_x_range),
y * (local_y_range) / (global_y_range))
return transform
#handles 3857 from meters
elif src_epsg == EPSG.EPSG3857:
def transform(x: int, y: int) -> Callable[[int, int], Transformer]:
point_in_px = PygeoPoint.from_meters(meter_y=y,
meter_x=x).pixels(zoom)
minx, _, miny, _ = cls._min_max_x_y(global_bounds)
x, y = map(lambda i, j: i - j, point_in_px, (minx, miny))
return (x * (local_x_range) / (global_x_range),
y * (local_y_range) / (global_y_range))
return transform
@classmethod
def create_geo_to_geo_transformer(
cls, src_epsg: EPSG,
tgt_epsg: EPSG) -> Callable[[int, int], Transformer]:
"""method to change from one projection to another projection.
supports EPSG transformations not Simple.
"""
if cls._is_simple(epsg=src_epsg) or cls._is_simple(epsg=tgt_epsg):
raise Exception(
f"Cannot be used for Simple transformations. Found {src_epsg} and {tgt_epsg}"
)
return EPSGTransformer(transformer=Transformer.from_crs(
src_epsg.value, tgt_epsg.value, always_xy=True).transform)
@classmethod
def create_geo_to_pixel_transformer(
cls,
src_epsg,
pixel_bounds: TiledBounds,
geo_bounds: TiledBounds,
zoom=0) -> Callable[[int, int], Transformer]:
"""method to change from a geo projection to Simple"""
transform_function = cls.geo_and_pixel(src_epsg=src_epsg,
pixel_bounds=pixel_bounds,
geo_bounds=geo_bounds,
zoom=zoom)
return EPSGTransformer(transformer=transform_function)
@classmethod
def create_pixel_to_geo_transformer(
cls,
src_epsg,
pixel_bounds: TiledBounds,
geo_bounds: TiledBounds,
zoom=0) -> Callable[[int, int], Transformer]:
"""method to change from a geo projection to Simple"""
transform_function = cls.geo_and_pixel(src_epsg=src_epsg,
pixel_bounds=pixel_bounds,
geo_bounds=geo_bounds,
zoom=zoom)
return EPSGTransformer(transformer=transform_function)
def _get_point_obj(self, point) -> Point:
point = self.transformer(point.x, point.y)
return Point(x=point[0], y=point[1])
def __call__(
self, shape: Union[Point, Line, Rectangle, Polygon]
) -> Union[VectorTool, List[VectorTool]]:
if isinstance(shape, list):
return [self(geom) for geom in shape]
if isinstance(shape, Point):
return self._get_point_obj(shape)
if isinstance(shape, Line):
return Line(points=[self._get_point_obj(p) for p in shape.points])
if isinstance(shape, Polygon):
return Polygon(
points=[self._get_point_obj(p) for p in shape.points])
if isinstance(shape, Rectangle):
return Rectangle(start=self._get_point_obj(shape.start),
end=self._get_point_obj(shape.end))
else:
raise ValueError(f"Unsupported type found: {type(shape)}") | [
"logging.getLogger",
"pygeotile.point.Point.from_latitude_longitude",
"pygeotile.point.Point.from_pixel",
"math.tan",
"pydantic.validator",
"numpy.hstack",
"concurrent.futures.ThreadPoolExecutor",
"pygeotile.point.Point.from_meters",
"io.BytesIO",
"math.radians",
"numpy.max",
"numpy.array",
... | [((765, 792), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (782, 792), False, 'import logging\n'), ((1532, 1551), 'pydantic.validator', 'validator', (['"""bounds"""'], {}), "('bounds')\n", (1541, 1551), False, 'from pydantic import BaseModel, validator\n'), ((3019, 3035), 'pydantic.validator', 'validator', (['"""url"""'], {}), "('url')\n", (3028, 3035), False, 'from pydantic import BaseModel, validator\n'), ((10812, 10860), 'google.api_core.retry.Retry', 'retry.Retry', ([], {'initial': '(1)', 'maximum': '(16)', 'multiplier': '(2)'}), '(initial=1, maximum=16, multiplier=2)\n', (10823, 10860), False, 'from google.api_core import retry\n'), ((12767, 12791), 'pydantic.validator', 'validator', (['"""zoom_levels"""'], {}), "('zoom_levels')\n", (12776, 12791), False, 'from pydantic import BaseModel, validator\n'), ((8837, 8854), 'math.radians', 'math.radians', (['lat'], {}), '(lat)\n', (8849, 8854), False, 'import math\n'), ((10790, 10805), 'numpy.vstack', 'np.vstack', (['rows'], {}), '(rows)\n', (10799, 10805), True, 'import numpy as np\n'), ((14442, 14509), 'numpy.array', 'np.array', (['[(point.x, point.y) for point in pixel_bounds]'], {'dtype': 'int'}), '([(point.x, point.y) for point in pixel_bounds], dtype=int)\n', (14450, 14509), True, 'import numpy as np\n'), ((19199, 19228), 'labelbox.data.annotation_types.Point', 'Point', ([], {'x': 'point[0]', 'y': 'point[1]'}), '(x=point[0], y=point[1])\n', (19204, 19228), False, 'from labelbox.data.annotation_types import Rectangle, Point, Line, Polygon\n'), ((13581, 13601), 'numpy.max', 'np.max', (['bounds[:, 0]'], {}), '(bounds[:, 0])\n', (13587, 13601), True, 'import numpy as np\n'), ((13604, 13624), 'numpy.min', 'np.min', (['bounds[:, 0]'], {}), '(bounds[:, 0])\n', (13610, 13624), True, 'import numpy as np\n'), ((13643, 13663), 'numpy.max', 'np.max', (['bounds[:, 1]'], {}), '(bounds[:, 1])\n', (13649, 13663), True, 'import numpy as np\n'), ((13666, 13686), 'numpy.min', 'np.min', (['bounds[:, 1]'], {}), '(bounds[:, 1])\n', (13672, 13686), True, 'import numpy as np\n'), ((13905, 13925), 'numpy.min', 'np.min', (['bounds[:, 0]'], {}), '(bounds[:, 0])\n', (13911, 13925), True, 'import numpy as np\n'), ((13927, 13947), 'numpy.max', 'np.max', (['bounds[:, 0]'], {}), '(bounds[:, 0])\n', (13933, 13947), True, 'import numpy as np\n'), ((13949, 13969), 'numpy.min', 'np.min', (['bounds[:, 1]'], {}), '(bounds[:, 1])\n', (13955, 13969), True, 'import numpy as np\n'), ((13984, 14004), 'numpy.max', 'np.max', (['bounds[:, 1]'], {}), '(bounds[:, 1])\n', (13990, 14004), True, 'import numpy as np\n'), ((9897, 9954), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': 'TILE_DOWNLOAD_CONCURRENCY'}), '(max_workers=TILE_DOWNLOAD_CONCURRENCY)\n', (9915, 9954), False, 'from concurrent.futures import ThreadPoolExecutor\n'), ((10758, 10772), 'numpy.hstack', 'np.hstack', (['row'], {}), '(row)\n', (10767, 10772), True, 'import numpy as np\n'), ((11142, 11163), 'io.BytesIO', 'BytesIO', (['data.content'], {}), '(data.content)\n', (11149, 11163), False, 'from io import BytesIO\n'), ((15547, 15601), 'pygeotile.point.Point.from_pixel', 'PygeoPoint.from_pixel', ([], {'pixel_x': 'x', 'pixel_y': 'y', 'zoom': 'zoom'}), '(pixel_x=x, pixel_y=y, zoom=zoom)\n', (15568, 15601), True, 'from pygeotile.point import Point as PygeoPoint\n'), ((17740, 17808), 'pyproj.Transformer.from_crs', 'Transformer.from_crs', (['src_epsg.value', 'tgt_epsg.value'], {'always_xy': '(True)'}), '(src_epsg.value, tgt_epsg.value, always_xy=True)\n', (17760, 17808), False, 'from pyproj import Transformer\n'), ((14686, 14757), 'pygeotile.point.Point.from_latitude_longitude', 'PygeoPoint.from_latitude_longitude', ([], {'latitude': 'point.y', 'longitude': 'point.x'}), '(latitude=point.y, longitude=point.x)\n', (14720, 14757), True, 'from pygeotile.point import Point as PygeoPoint\n'), ((15735, 15820), 'pyproj.Transformer.from_crs', 'Transformer.from_crs', (['EPSG.EPSG4326.value', 'geo_bounds_epsg.value'], {'always_xy': '(True)'}), '(EPSG.EPSG4326.value, geo_bounds_epsg.value, always_xy=True\n )\n', (15755, 15820), False, 'from pyproj import Transformer\n'), ((8927, 8944), 'math.tan', 'math.tan', (['lat_rad'], {}), '(lat_rad)\n', (8935, 8944), False, 'import math\n'), ((10632, 10699), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.tile_size, self.tile_size, 3)', 'dtype': 'np.uint8'}), '(shape=(self.tile_size, self.tile_size, 3), dtype=np.uint8)\n', (10640, 10699), True, 'import numpy as np\n'), ((16200, 16259), 'pygeotile.point.Point.from_latitude_longitude', 'PygeoPoint.from_latitude_longitude', ([], {'latitude': 'y', 'longitude': 'x'}), '(latitude=y, longitude=x)\n', (16234, 16259), True, 'from pygeotile.point import Point as PygeoPoint\n'), ((16781, 16825), 'pygeotile.point.Point.from_meters', 'PygeoPoint.from_meters', ([], {'meter_y': 'y', 'meter_x': 'x'}), '(meter_y=y, meter_x=x)\n', (16803, 16825), True, 'from pygeotile.point import Point as PygeoPoint\n')] |
import unittest
import time
import httpagentparser
detect = httpagentparser.detect
simple_detect = httpagentparser.simple_detect
data = (
# tuple of tuples
# tuple (agent-string, expected result of simple_detect, expected result of detect)
("Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3861.0 Safari/537.36 Edg/77.0.230.2",
('Windows 10', 'ChromiumEdge 172.16.17.32'),
{'bot': False, 'os': {'version': '10', 'name': 'Windows'}, 'browser': {'version': '172.16.17.32', 'name': 'ChromiumEdge'}},),
("Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en-GB; rv:1.9.0.10) Gecko/2009042315 Firefox/3.0.10",
('MacOS Macintosh X 10.5', 'Firefox 3.0.10'),
{'bot': False, 'flavor': {'version': 'X 10.5', 'name': 'MacOS'}, 'os': {'name': 'Macintosh'}, 'browser': {'version': '3.0.10', 'name': 'Firefox'}},),
("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_6) AppleWebKit/534.24 (KHTML, like Gecko) Chrome/11.0.696.3 Safari/534.24,gzip(gfe)",
('MacOS Macintosh X 10.6.6', 'Chrome 11.0.696.3'),
{'bot': False, 'flavor': {'version': 'X 10.6.6', 'name': 'MacOS'}, 'os': {'name': 'Macintosh'}, 'browser': {'version': '11.0.696.3', 'name': 'Chrome'}},),
("Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2) Gecko/20100308 Ubuntu/10.04 (lucid) Firefox/3.6 GTB7.1",
('Ubuntu Linux 10.04', 'Firefox 3.6'),
{'bot': False, 'dist': {'version': '10.04', 'name': 'Ubuntu'}, 'os': {'name': 'Linux'}, 'browser': {'version': '3.6', 'name': 'Firefox'}},),
("Mozilla/5.0 (Linux; U; Android 2.2.1; fr-ch; A43 Build/FROYO) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
('Android Linux 2.2.1', 'AndroidBrowser'),
{'bot': False, 'dist': {'version': '2.2.1', 'name': 'Android'}, 'os': {'name': 'Linux'}, 'browser': {'name': 'AndroidBrowser'}},),
("Mozilla/5.0 (iPhone; U; CPU like Mac OS X; en) AppleWebKit/420+ (KHTML, like Gecko) Version/3.0 Mobile/1A543a Safari/419.3",
('iPhone iOS', 'Safari 3.0'),
{'bot': False, 'os': {'name': 'iOS'}, 'dist': {'name': 'iPhone'}, 'browser': {'version': '3.0', 'name': 'Safari'}},),
("Mozilla/5.0 (X11; CrOS i686 0.0.0) AppleWebKit/534.24 (KHTML, like Gecko) Chrome/11.0.696.27 Safari/534.24,gzip(gfe)",
('ChromeOS 0.0.0', 'Chrome 11.0.696.27'),
{'bot': False, 'os': {'name': 'ChromeOS', 'version': '0.0.0'}, 'browser': {'name': 'Chrome', 'version': '11.0.696.27'}},),
("Mozilla/4.0 (compatible; MSIE 6.0; MSIE 5.5; Windows NT 5.1) Opera 7.02 [en]",
('Windows XP', 'Opera 7.02'),
{'bot': False, 'os': {'name': 'Windows', 'version': 'XP'}, 'browser': {'name': 'Opera', 'version': '7.02'}},),
("Opera/9.64(Windows NT 5.1; U; en) Presto/2.1.1",
('Windows XP', 'Opera 9.64'),
{'bot': False, 'os': {'name': 'Windows', 'version': 'XP'}, 'browser': {'name': 'Opera', 'version': '9.64'}},),
("Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)",
('Windows 7', 'Microsoft Internet Explorer 10.0'),
{'bot': False, 'os': {'version': '7', 'name': 'Windows'}, 'browser': {'version': '10.0', 'name': 'Microsoft Internet Explorer'}},),
("Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0; yie8)",
('Windows 7', 'Microsoft Internet Explorer 9.0'),
{'bot': False, 'os': {'version': '7', 'name': 'Windows'}, 'browser': {'version': '9.0', 'name': 'Microsoft Internet Explorer'}},),
("Mozilla/5.0 (MSIE 7.0; Macintosh; U; SunOS; X11; gu; SV1; InfoPath.2; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648",
('Macintosh', 'Microsoft Internet Explorer 7.0'),
{'bot': False, 'os': {'name': 'Macintosh'}, 'browser': {'version': '7.0', 'name': 'Microsoft Internet Explorer'}}),
("Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Trident/4.0; GTB6.5; QQDownload 534; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; SLCC2; .NET CLR 2.0.50727; Media Center PC 6.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729)",
('Windows XP', 'Microsoft Internet Explorer 8.0'),
{'bot': False, 'os': {'version': 'XP', 'name': 'Windows'}, 'browser': {'version': '8.0', 'name': 'Microsoft Internet Explorer'}}),
('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; SLCC1; .NET CLR 2.0.50727; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618; .NET4.0C)',
('Windows XP', 'Microsoft Internet Explorer 8.0'),
{'bot': False, 'os': {'version': 'XP', 'name': 'Windows'}, 'browser': {'version': '8.0', 'name': 'Microsoft Internet Explorer'}},),
("Opera/9.80 (X11; Linux i686; U; en) Presto/2.9.168 Version/11.50",
("Linux", "Opera 11.50"),
{'bot': False, "os": {"name": "Linux"}, "browser": {"name": "Opera", "version": "11.50"}},),
("Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.7.5) Gecko/20060127 Netscape/8.1",
("Windows XP", "Netscape 8.1"),
{'bot': False, 'os': {'name': 'Windows', 'version': 'XP'}, 'browser': {'name': 'Netscape', 'version': '8.1'}},),
("Mozilla/5.0 (hp-tablet; Linux; hpwOS/3.0.2; U; en-US) AppleWebKit/534.6 (KHTML, like Gecko) wOSBrowser/234.40.1 Safari/534.6 TouchPad/1.0",
("WebOS Linux 3.0.2", "WOSBrowser"),
{'bot': False, 'dist': {'name': 'WebOS', 'version': '3.0.2'}, 'os' : {'name' : 'Linux'}, 'browser': {'name': 'WOSBrowser'}},),
("Mozilla/5.0 (iPad; CPU OS 5_0_1 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9A405 Safari/7534.48.3",
('IPad iOS 5.0.1', 'Safari 5.1'),
{'bot': False, 'os': {'name': 'iOS'}, 'dist': {'version': '5.0.1', 'name': 'IPad'}, 'browser': {'version': '5.1', 'name': 'Safari'}},),
("AppleCoreMedia/1.0.0.10B329 (iPad; U; CPU OS 6_1_3 like Mac OS X; en_us)",
('IPad iOS 6.1.3', 'Unknown Browser'),
{'bot': False, 'dist': {'name': 'IPad', 'version': '6.1.3'}, 'os': {'name': 'iOS'}},),
("Mozilla/5.0 (iPad; CPU OS 7_1 like Mac OS X) AppleWebKit/537.51.2 (KHTML, like Gecko) Version/7.0 Mobile/11D167 Safari/9537.53",
('IPad iOS 7.1', 'Safari 7.0'),
{'bot': False, 'browser': {'name': 'Safari', 'version': '7.0'}, 'dist': {'name': 'IPad', 'version': '7.1'}, 'os': {'name': 'iOS'}}),
("Mozilla/5.0 (Linux; U; Android 3.2.1; en-gb; Transformer TF101 Build/HTK75) AppleWebKit/534.13 (KHTML, like Gecko) Version/4.0 Safari/534.13",
('Android Linux 3.2.1', 'AndroidBrowser'),
{'bot': False, 'dist': {'version': '3.2.1', 'name': 'Android'}, 'os': {'name': 'Linux'}, 'browser': {'name': 'AndroidBrowser'}},),
("Mozilla/5.0 (BlackBerry; U; BlackBerry 9700; en-US) AppleWebKit/534.8+ (KHTML, like Gecko) Version/6.0.0.448 Mobile Safari/534.8+",
('Blackberry', 'Safari 6.0.0.448'),
{'bot': False, 'os': {'name': 'Blackberry'}, 'browser': {'version': '6.0.0.448', 'name': 'Safari'}},),
("Mozilla/5.0 (PlayBook; U; RIM Tablet OS 1.0.0; en-US) AppleWebKit/534.11+ (KHTML, like Gecko) Version/7.1.0.7 Safari/534.11+",
('BlackberryPlaybook', 'Safari 7.1.0.7'),
{'bot': False, 'dist': {'name': 'BlackberryPlaybook'}, 'browser': {'version': '7.1.0.7', 'name': 'Safari'}},),
("Opera/9.80 (Android 2.3.5; Linux; Opera Mobi/build-1203300859; U; en) Presto/2.10.254 Version/12.00",
('Android Linux 2.3.5', 'Opera Mobile 12.00'),
{'bot': False, 'dist': {'version': '2.3.5', 'name': 'Android'}, 'os': {'name': 'Linux'}, 'browser': {'version': '12.00', 'name': 'Opera Mobile'}},),
("Mozilla/5.0 (Linux; U; Android 2.3.5; en-in; HTC_DesireS_S510e Build/GRJ90) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
('Android Linux 2.3.5', 'AndroidBrowser'),
{'bot': False, 'dist': {'version': '2.3.5', 'name': 'Android'}, 'os': {'name': 'Linux'}, 'browser': {'name': 'AndroidBrowser'}},),
("Mozilla/5.0 (iPhone; U; CPU iPhone OS 5_1_1 like Mac OS X; es-es) AppleWebKit/534.46.0 (KHTML, like Gecko) CriOS/19.0.1084.60 Mobile/9B206 Safari/7534.48.3",
('iPhone iOS 5.1.1', 'ChromeiOS 19.0.1084.60'),
{'bot': False, 'os': {'name': 'iOS'}, 'dist': {'version': '5.1.1', 'name': 'iPhone'}, 'browser': {'version': '19.0.1084.60', 'name': 'ChromeiOS'}}),
("Mozilla/5.0 (X11; Linux x86_64; rv:7.0.1) Gecko/20111011 Firefox/7.0.1 SeaMonkey/2.4.1",
("Linux", "SeaMonkey 2.4.1"),
{'bot': False, "os" : {"name": "Linux"}, "browser": {"name": "SeaMonkey", "version": "2.4.1"}}),
("Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:16.0) Gecko/20100101 Firefox/16.0",
("Ubuntu Linux", "Firefox 16.0"),
{'bot': False, 'dist': {'name': 'Ubuntu'}, 'os': {'name': 'Linux'}, 'browser': {'version': '16.0', 'name': 'Firefox'}},),
("Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.1 Safari/537.17",
("Linux", "Chrome 24.0.1312.1"),
{'bot': False, "os" : {"name": "Linux"}, "browser": {"name": "Chrome", "version": "24.0.1312.1"}}),
("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) AppleWebKit/537.19 (KHTML, like Gecko) Chrome/25.0.1323.1 Safari/537.19",
("MacOS Macintosh X 10.8.2", "Chrome 25.0.1323.1"),
{'bot': False, 'flavor': {'name': 'MacOS', 'version': 'X 10.8.2'}, 'os': {'name': 'Macintosh'}, 'browser': {'version': '25.0.1323.1', 'name': 'Chrome'}},),
("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) AppleWebKit/536.26.14 (KHTML, like Gecko) Version/6.0.1 Safari/536.26.14",
("MacOS Macintosh X 10.8.2", "Safari 6.0.1"),
{'bot': False, 'flavor': {'name': 'MacOS', 'version': 'X 10.8.2'}, 'os': {'name': 'Macintosh'}, 'browser': {'version': '6.0.1', 'name': 'Safari'}},),
("Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11",
("Windows 7", "Chrome 23.0.1271.64"),
{'bot': False, 'os': {'name': 'Windows', 'version': '7'}, 'browser': {'version': '23.0.1271.64', 'name': 'Chrome'}},),
("Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0)",
("Windows XP", "Microsoft Internet Explorer 8.0"),
{'bot': False, 'os': {'name': 'Windows', 'version': 'XP'}, 'browser': {'version': '8.0', 'name': 'Microsoft Internet Explorer'}},),
("Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0)",
("Windows 7", "Microsoft Internet Explorer 9.0"),
{'bot': False, 'os': {'name': 'Windows', 'version': '7'}, 'browser': {'version': '9.0', 'name': 'Microsoft Internet Explorer'}},),
("Mozilla/5.0 (Windows NT 6.1; WOW64; rv:15.0) Gecko/20100101 Firefox/15.0.1",
("Windows 7", "Firefox 15.0.1"),
{'bot': False, 'os': {'name': 'Windows', 'version': '7'}, 'browser': {'version': '15.0.1', 'name': 'Firefox'}},),
("Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.57.2 (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2",
("Windows 7", "Safari 5.1.7"),
{'bot': False, 'os': {'name': 'Windows', 'version': '7'}, 'browser': {'version': '5.1.7', 'name': 'Safari'}},),
("Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.101 Safari/537.36 OPR/17.0.1241.53",
("Windows 7", "Opera 17.0.1241.53"),
{'bot': False, 'os': {'name': 'Windows', 'version': '7'}, 'browser': {'version': '17.0.1241.53', 'name': 'Opera'}},),
('Mozilla/5.0+(X11;+CrOS+i686+2465.163.0)+AppleWebKit/537.1+(KHTML,+like+Gecko)+Chrome/21.0.1180.91+Safari/537.1',
('ChromeOS 2465.163.0', 'Chrome 21.0.1180.91'),
{'bot': False, 'os': {'version': '2465.163.0', 'name': 'ChromeOS'}, 'browser': {'version': '21.0.1180.91', 'name': 'Chrome'}},),
('Mozilla/5.0 (Linux; U; en-us; KFOT Build/IML74K) AppleWebKit/535.19 (KHTML, like Gecko) Silk/2.2 Safari/535.19 Silk-Accelerated=true',
('Linux', 'Safari 535.19'),
{'bot': False, 'os': {'name': 'Linux'}, 'browser': {'version': '535.19', 'name': 'Safari'}}),
('Mozilla/5.0 (Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko',
('Windows 8.1', 'Microsoft Internet Explorer 11.0'),
{'bot': False, 'os': {'name': 'Windows', 'version': '8.1'}, 'browser': {'version': '11.0', 'name': 'Microsoft Internet Explorer'}},),
('Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)',
('Unknown OS', 'GoogleBot 2.1'),
{'bot': True, 'browser': {'name': 'GoogleBot', 'version': '2.1'}},),
('"Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)"',
('Unknown OS', 'BingBot 2.0'),
{'bot': True, 'browser': {'name': 'BingBot', 'version': '2.0'}}),
('Mozilla/5.0 (compatible; YandexBot/3.0)',
('Unknown OS', 'YandexBot 3.0'),
{'bot': True, 'browser': {'name': 'YandexBot', 'version': '3.0'}}),
('Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
('Unknown OS', 'BaiduBot 2.0'),
{'bot': True, 'browser': {'name': 'BaiduBot', 'version': '2.0'}}),
('Mozilla/5.0 (compatible; MSIE 9.0; Windows Phone OS 7.5; Trident/5.0; IEMobile/9.0; HTC; Radar 4G)',
('Windows Phone 7.5', 'Microsoft Internet Explorer 9.0'),
{'bot': False, 'browser': {'name': 'Microsoft Internet Explorer', 'version': '9.0'}, 'os': {'name': 'Windows Phone', 'version': '7.5'}}),
('Mozilla/4.0 (compatible; MSIE 7.0; Windows Phone OS 7.0; Trident/3.1; IEMobile/7.0; SAMSUNG; GT-i8700)',
('Windows Phone 7.0', 'Microsoft Internet Explorer 7.0'),
{'bot': False, 'browser': {'name': 'Microsoft Internet Explorer', 'version': '7.0'}, 'os': {'name': 'Windows Phone', 'version': '7.0'}}),
('Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; HTC_HD2_T8585; Windows Phone 6.5)',
('Windows Phone 6.5', 'Microsoft Internet Explorer 6.0'),
{'bot': False, 'browser': {'name': 'Microsoft Internet Explorer', 'version': '6.0'}, 'os': {'name': 'Windows Phone', 'version': '6.5'}}),
('Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; HTC_HD2_T8585; Windows Phone 6.5)',
('Windows Phone 6.5', 'Microsoft Internet Explorer 6.0'),
{'bot': False, 'browser': {'name': 'Microsoft Internet Explorer', 'version': '6.0'}, 'os': {'name': 'Windows Phone', 'version': '6.5'}}),
('Mozilla/5.0 (Windows NT 6.1; rv:6.0) Gecko/20110814 Firefox/6.0 Google (+https://developers.google.com/+/web/snippet/)',
('Windows 7', 'GoogleBot'),
{'bot': True, 'browser': {'name': 'GoogleBot'}, 'os': {'name': 'Windows', 'version': '7'}}),
('facebookexternalhit/1.1 (+http://www.facebook.com/externalhit_uatext.php)',
('Unknown OS', 'FacebookExternalHit 1.1'),
{'bot': True, 'browser': {'name': 'FacebookExternalHit', 'version': '1.1'},}),
('runscope-radar/2.0',
('Unknown OS', 'RunscopeRadar'),
{'bot': True, 'browser': {'name': 'RunscopeRadar'}}),
('Mozilla/5.0 (Mobile; Windows Phone 8.1; Android 4.0; ARM; Trident/7.0; Touch; rv:11.0; IEMobile/11.0; NOKIA; Lumia 720) like iPhone OS 7_0_3 Mac OS X AppleWebKit/537 (KHTML, like Gecko) Mobile Safari/537',
('Windows Phone 8.1', 'Microsoft Internet Explorer 11.0'),
{'os': {'version': '8.1', 'name': 'Windows Phone'}, 'bot': False, 'browser': {'version': '11.0', 'name': 'Microsoft Internet Explorer'}}),
('5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.73 YaBrowser/16.2.0.1818 (beta) Safari/537.36',
('Linux', 'Yandex.Browser 16.2.0.1818'),
{'os': {'name': 'Linux'}, 'bot': False, 'browser': {'version': '16.2.0.1818', 'name': 'Yandex.Browser'}}),
('Mozilla/5.0 (Linux; Android 8.0.0; Nexus 5X Build/OPR6.170623.023) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.84 Mobile Safari/537.36',
('Android Linux 8.0.0', 'Chrome 62.0.3202.84'),
{'bot': False, 'browser': {'name': 'Chrome', 'version': '62.0.3202.84'}, 'dist': {'name': 'Android', 'version': '8.0.0'}, 'os': {'name': 'Linux'}}),
('Mozilla/5.0 (Android 6.0.1; Mobile; rv:63.0) Gecko/63.0 Firefox/63.0',
('Android 6.0.1', 'Firefox 63.0'),
{'dist': {'name': 'Android', 'version': '6.0.1'}, 'bot': False, 'browser': {'name': 'Firefox', 'version': '63.0'}}),
)
class TestHAP(unittest.TestCase):
def setUp(self):
self.harass_repeat = 1000
self.data = data
def test_simple_detect(self):
for agent, simple_res, res in data:
self.assertEqual(simple_detect(agent), simple_res)
def test_detect(self):
for agent, simple_res, res in data:
detected = detect(agent)
del detected['platform']
self.assertEqual(detected, res)
def test_bot(self):
s = 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)'
d = detect(s)
self.assertTrue(d['bot'])
def test_harass(self):
then = time.time()
for agent, simple_res, res in data * self.harass_repeat:
detect(agent)
time_taken = time.time() - then
no_of_tests = len(self.data) * self.harass_repeat
print("\nTime taken for %s detections: %s" %
(no_of_tests, time_taken))
print("Time taken for single detection: %f" %
(time_taken / (len(self.data) * self.harass_repeat)))
def test_fill_none(self):
self.assertEqual(detect(''), {'platform': {'version': None, 'name': None}}) # default
self.assertEqual(detect('', fill_none=False), {'platform': {'version': None, 'name': None}})
result = detect('', fill_none=True)
self.assertEqual(result['os']['name'], None)
self.assertEqual(result['browser']['version'], None)
result = detect('Linux; Android', fill_none=True)
self.assertEqual(result['os']['name'], 'Linux')
self.assertEqual(result['os']['version'], None)
self.assertEqual(result['browser']['name'], 'AndroidBrowser')
self.assertEqual(result['browser']['version'], None)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"time.time"
] | [((17350, 17365), 'unittest.main', 'unittest.main', ([], {}), '()\n', (17363, 17365), False, 'import unittest\n'), ((16219, 16230), 'time.time', 'time.time', ([], {}), '()\n', (16228, 16230), False, 'import time\n'), ((16343, 16354), 'time.time', 'time.time', ([], {}), '()\n', (16352, 16354), False, 'import time\n')] |
"""Distributed Extension"""
import re
import os
import sys
try:
import drmaa
except:
pass
import itertools
import argparse
from cement.core import backend, handler, hook
from scilifelab.pm.core import command
LOG = backend.minimal_logger(__name__)
class DistributedCommandHandler(command.CommandHandler):
"""
This class is an implementation of the :ref:`ICommand
<scilifelab.pm.core.command>` interface.
"""
class Meta:
"""Handler meta-data"""
interface = command.ICommand
"""The interface that this class implements."""
label = 'distributed'
"""The string identifier of this handler."""
n_submitted_jobs = 0
"""The number of submitted jobs"""
jobid = None
"""The submitted jobid"""
platform_args = None
"""Platform specific arguments"""
batch_command = []
"""Batch command array"""
def command(self, cmd_args, capture=True, ignore_error=False, cwd=None, **kw):
## Is there no easier way to get at --drmaa?!?
if '--drmaa' in self.app._meta.argv:
self.drmaa(cmd_args, capture, ignore_error, cwd, **kw)
else:
pass
def _check_args(self, **kw):
pargs = kw.get('platform_args', [])
if not self.app.pargs.account and "-A" not in pargs and "--account" not in pargs:
return False
if not self.app.pargs.jobname and "-J" not in pargs and "--job-name" not in pargs:
return False
if not self.app.pargs.partition and "-p" not in pargs and "--partition" not in pargs:
return False
if not self.app.pargs.time and "-t" not in pargs and "--time" not in pargs:
return False
return True
def _save_job_id(self, idfile="JOBID", **job_args):
"""Save jobid to file in working directory"""
JOBIDFILE = os.path.join(job_args['workingDirectory'], idfile)
with open(JOBIDFILE, "w") as fh:
self.app.log.info("Saving jobid {} to file {}".format(self._meta.jobid, JOBIDFILE))
fh.write(self._meta.jobid)
def monitor(self, work_dir, idfile="JOBID"):
"""Check for existing job"""
return self._monitor_job(idfile, **{'workingDirectory':work_dir})
def _monitor_job(self, idfile="JOBID", **job_args):
"""Check if job is currently being run or in queue. For now,
the user will manually have to terminate job before proceeding"""
JOBIDFILE = os.path.join(job_args['workingDirectory'], idfile)
if not os.path.exists(JOBIDFILE):
return
self.app.log.debug("Will read {} for jobid".format(JOBIDFILE))
with open(JOBIDFILE) as fh:
jobid = fh.read()
## http://code.google.com/p/drmaa-python/wiki/Tutorial
decodestatus = {
drmaa.JobState.UNDETERMINED: 'process status cannot be determined',
drmaa.JobState.QUEUED_ACTIVE: 'job is queued and active',
drmaa.JobState.SYSTEM_ON_HOLD: 'job is queued and in system hold',
drmaa.JobState.USER_ON_HOLD: 'job is queued and in user hold',
drmaa.JobState.USER_SYSTEM_ON_HOLD: 'job is queued and in user and system hold',
drmaa.JobState.RUNNING: 'job is running',
drmaa.JobState.SYSTEM_SUSPENDED: 'job is system suspended',
drmaa.JobState.USER_SUSPENDED: 'job is user suspended',
drmaa.JobState.DONE: 'job finished normally',
drmaa.JobState.FAILED: 'job finished, but failed',
}
s = drmaa.Session()
s.initialize()
try:
status = s.jobStatus(str(jobid))
self.app.log.debug("Getting status for jobid {}".format(jobid))
self.app.log.info("{}".format(decodestatus[status]))
if status in [drmaa.JobState.QUEUED_ACTIVE, drmaa.JobState.RUNNING, drmaa.JobState.UNDETERMINED]:
self.app.log.warn("{}; please terminate job before proceeding".format(decodestatus[status]))
return True
except drmaa.errors.InternalException:
self.app.log.warn("No such jobid {}".format(jobid))
pass
s.exit()
return
def drmaa(self, cmd_args, capture=True, ignore_error=False, cwd=None, **kw):
if self.app.pargs.partition == "node" and self.app.pargs.max_node_jobs < self._meta.n_submitted_jobs:
self.app.log.info("number of submitted jobs larger than maximum number of allowed node jobs; not submitting job")
return
self._meta.n_submitted_jobs = self._meta.n_submitted_jobs + 1
if kw.get('platform_args', None):
platform_args = opt_to_dict(kw['platform_args'])
else:
platform_args = opt_to_dict([])
kw.update(**vars(self.app.pargs))
job_args = make_job_template_args(platform_args, **kw)
if not self._check_args(**kw):
self.app.log.warn("missing argument; cannot proceed with drmaa command. Make sure you provide time, account, partition, and jobname")
return
if kw.get('monitorJob', False):
if self._monitor_job(**job_args):
self.app.log.info("exiting from {}".format(__name__) )
return
command = " ".join(cmd_args)
def runpipe():
s = drmaa.Session()
s.initialize()
jt = s.createJobTemplate()
jt.remoteCommand = cmd_args[0]
jt.args = cmd_args[1:]
jt.jobName = job_args['jobname']
if os.path.isdir(job_args['outputPath']):
jt.outputPath = ":{}".format(os.path.join(os.path.abspath(job_args['outputPath']), jt.jobName + "-drmaa.out"))
else:
jt.outputPath = ":{}".format(os.path.abspath(job_args['outputPath']))
if os.path.isdir(job_args['errorPath']):
jt.errorPath = ":{}".format(os.path.join(os.path.abspath(job_args['errorPath']), jt.jobName + "-drmaa.err"))
else:
jt.errorPath = ":{}".format(os.path.abspath(job_args['errorPath']))
jt.workingDirectory = os.path.abspath(job_args['workingDirectory'])
jt.nativeSpecification = "-t {time} -p {partition} -A {account} {extra}".format(**job_args)
if kw.get('email', None):
jt.email=[kw.get('email')]
self.app.log.info("Submitting job with native specification {}".format(jt.nativeSpecification))
self.app.log.info("Working directory: {}".format(jt.workingDirectory))
self.app.log.info("Output logging: {}".format(jt.outputPath))
self.app.log.info("Error logging: {}".format(jt.errorPath))
self._meta.jobid = s.runJob(jt)
self.app.log.info('Your job has been submitted with id ' + self._meta.jobid)
if kw.get('saveJobId', False):
self._save_job_id(**job_args)
s.deleteJobTemplate(jt)
s.exit()
if self.app.pargs.batch:
self._meta.batch_command.append(command)
else:
return self.dry(command, runpipe)
def opt_to_dict(opts):
"""Transform option list to a dictionary.
:param opts: option list
:returns: option dictionary
"""
if isinstance(opts, dict):
return
args = list(itertools.chain.from_iterable([x.split("=") for x in opts]))
opt_d = {k: True if v.startswith('-') else v
for k,v in zip(args, args[1:]+["--"]) if k.startswith('-')}
return opt_d
def convert_to_drmaa_time(t):
"""Convert time assignment to format understood by drmaa.
In particular transforms days to hours if provided format is
d-hh:mm:ss. Also transforms mm:ss to 00:mm:ss.
:param t: time string
:returns: converted time string formatted as hh:mm:ss or None if
time string is malformatted
"""
if not t:
return None
m = re.search("(^[0-9]+\-)?([0-9]+:)?([0-9]+):([0-9]+)", t)
if not m:
return None
days = None
if m.group(1):
days = m.group(1).rstrip("-")
hours = None
if m.group(2):
hours = m.group(2).rstrip(":")
minutes = m.group(3)
seconds = m.group(4)
if days:
hours = 24 * int(days) + int(hours)
else:
if not hours:
hours = "00"
if len(str(hours)) == 1:
hours = "0" + hours
if len(str(minutes)) == 1:
minutes = "0" + minutes
t_new = "{}:{}:{}".format(hours, minutes, seconds)
return t_new
def make_job_template_args(opt_d, **kw):
"""Given a dictionary of arguments, update with kw dict that holds arguments passed to argv.
:param opt_d: dictionary of option key/value pairs
:param kw: dictionary of program arguments
:returns: dictionary of job arguments
"""
job_args = {}
job_args['jobname'] = kw.get('jobname', None) or opt_d.get('-J', None) or opt_d.get('--job-name', None)
job_args['time'] = kw.get('time', None) or opt_d.get('-t', None) or opt_d.get('--time', None)
job_args['time'] = convert_to_drmaa_time(job_args['time'])
job_args['partition'] = kw.get('partition', None) or opt_d.get('-p', None) or opt_d.get('--partition', None)
job_args['account'] = kw.get('account', None) or opt_d.get('-A', None) or opt_d.get('--account', None)
job_args['outputPath'] = kw.get('outputPath', None) or opt_d.get('--output', None) or opt_d.get('-o', os.curdir)
job_args['errorPath'] = kw.get('errorPath', None) or opt_d.get('--error', None) or opt_d.get('-e', os.curdir)
job_args['workingDirectory'] = kw.get('workingDirectory', None) or opt_d.get('-D', os.curdir)
job_args['email'] = kw.get('email', None) or opt_d.get('--mail-user', None)
invalid_keys = ["--mail-user", "--mail-type", "-o", "--output", "-D", "--workdir", "-J", "--job-name", "-p", "--partition", "-t", "--time", "-A", "--account", "-e", "--error"]
extra_keys = [x for x in opt_d.keys() if x not in invalid_keys]
extra_args = ["{}={}".format(x, opt_d[x]) if x.startswith("--") else "{} {}".format(x, opt_d[x]) for x in extra_keys]
job_args['extra'] = kw.get('extra_args', None) or extra_args
job_args['extra'] = " ".join(job_args['extra'])
return job_args
def add_drmaa_option(app):
"""
Adds the '--drmaa' argument to the argument object.
:param app: The application object.
"""
app.args.add_argument('--drmaa', dest='cmd_handler',
action='store_const', help='toggle drmaa command handler', const='drmaa')
def add_shared_distributed_options(app):
"""
Adds shared distributed arguments to the argument object.
:param app: The application object.
"""
group = app.args.add_argument_group('distributed', 'Options for distributed execution.')
group.add_argument('-A', '--account', type=str,
action='store', help='job account', default=None)
group.add_argument('--jobname', type=str,
action='store', help='job name', default=None)
group.add_argument('-t', '--time',
action='store', help='time limit', default=None)
group.add_argument('--partition', type=str,
action='store', help='partition (node, core or devel)', default=None)
group.add_argument('--extra_args', type=str, nargs=argparse.REMAINDER,
action='store', help='extra arguments to pass to drmaa native specification. NOTE: must be supplied last since it uses remaining part of argument list', default=None)
group.add_argument('--max_node_jobs', type=int, default=10,
action='store', help='maximum number of node jobs (default 10)')
group.add_argument('--email', help="set user email address", action="store", default=None, type=str)
group.add_argument('--batch', help="submit jobs as a batch, useful for submitting a number of jobs to the same node", action="store_true", default=False)
def set_distributed_handler(app):
"""
Overrides the configured command handler if ``--drmaa`` is passed at the
command line.
:param app: The application object.
"""
if '--drmaa' in app._meta.argv:
app._meta.cmd_handler = 'distributed'
app._setup_cmd_handler()
def run_batch_command(app):
"""
If option 'batch' was set, run commands stored in batch_command
variable.
NOTE: currently runs jobs *sequentially*. One could imagine adding
a pipe to 'gnus parallel' for single-core jobs.
:param app: The application object.
"""
if not app.pargs.batch:
return
command = ";\n".join(app.cmd._meta.batch_command)
app.pargs.batch = False
app.cmd.command([command], **{'platform_args':{}, 'saveJobId':True, 'workingDirectory':os.curdir})
def load():
"""Called by the framework when the extension is 'loaded'."""
if not os.getenv("DRMAA_LIBRARY_PATH"):
LOG.debug("No environment variable $DRMAA_LIBRARY_PATH: loading {} failed".format(__name__))
return
hook.register('post_setup', add_drmaa_option)
hook.register('post_setup', add_shared_distributed_options)
hook.register('pre_run', set_distributed_handler)
hook.register('post_run', run_batch_command)
handler.register(DistributedCommandHandler)
| [
"os.path.exists",
"os.getenv",
"os.path.join",
"cement.core.handler.register",
"os.path.isdir",
"cement.core.hook.register",
"cement.core.backend.minimal_logger",
"os.path.abspath",
"drmaa.Session",
"re.search"
] | [((226, 258), 'cement.core.backend.minimal_logger', 'backend.minimal_logger', (['__name__'], {}), '(__name__)\n', (248, 258), False, 'from cement.core import backend, handler, hook\n'), ((7977, 8033), 're.search', 're.search', (['"""(^[0-9]+\\\\-)?([0-9]+:)?([0-9]+):([0-9]+)"""', 't'], {}), "('(^[0-9]+\\\\-)?([0-9]+:)?([0-9]+):([0-9]+)', t)\n", (7986, 8033), False, 'import re\n'), ((13146, 13191), 'cement.core.hook.register', 'hook.register', (['"""post_setup"""', 'add_drmaa_option'], {}), "('post_setup', add_drmaa_option)\n", (13159, 13191), False, 'from cement.core import backend, handler, hook\n'), ((13196, 13255), 'cement.core.hook.register', 'hook.register', (['"""post_setup"""', 'add_shared_distributed_options'], {}), "('post_setup', add_shared_distributed_options)\n", (13209, 13255), False, 'from cement.core import backend, handler, hook\n'), ((13260, 13309), 'cement.core.hook.register', 'hook.register', (['"""pre_run"""', 'set_distributed_handler'], {}), "('pre_run', set_distributed_handler)\n", (13273, 13309), False, 'from cement.core import backend, handler, hook\n'), ((13314, 13358), 'cement.core.hook.register', 'hook.register', (['"""post_run"""', 'run_batch_command'], {}), "('post_run', run_batch_command)\n", (13327, 13358), False, 'from cement.core import backend, handler, hook\n'), ((13363, 13406), 'cement.core.handler.register', 'handler.register', (['DistributedCommandHandler'], {}), '(DistributedCommandHandler)\n', (13379, 13406), False, 'from cement.core import backend, handler, hook\n'), ((1917, 1967), 'os.path.join', 'os.path.join', (["job_args['workingDirectory']", 'idfile'], {}), "(job_args['workingDirectory'], idfile)\n", (1929, 1967), False, 'import os\n'), ((2525, 2575), 'os.path.join', 'os.path.join', (["job_args['workingDirectory']", 'idfile'], {}), "(job_args['workingDirectory'], idfile)\n", (2537, 2575), False, 'import os\n'), ((3601, 3616), 'drmaa.Session', 'drmaa.Session', ([], {}), '()\n', (3614, 3616), False, 'import drmaa\n'), ((12993, 13024), 'os.getenv', 'os.getenv', (['"""DRMAA_LIBRARY_PATH"""'], {}), "('DRMAA_LIBRARY_PATH')\n", (13002, 13024), False, 'import os\n'), ((2591, 2616), 'os.path.exists', 'os.path.exists', (['JOBIDFILE'], {}), '(JOBIDFILE)\n', (2605, 2616), False, 'import os\n'), ((5382, 5397), 'drmaa.Session', 'drmaa.Session', ([], {}), '()\n', (5395, 5397), False, 'import drmaa\n'), ((5602, 5639), 'os.path.isdir', 'os.path.isdir', (["job_args['outputPath']"], {}), "(job_args['outputPath'])\n", (5615, 5639), False, 'import os\n'), ((5887, 5923), 'os.path.isdir', 'os.path.isdir', (["job_args['errorPath']"], {}), "(job_args['errorPath'])\n", (5900, 5923), False, 'import os\n'), ((6187, 6232), 'os.path.abspath', 'os.path.abspath', (["job_args['workingDirectory']"], {}), "(job_args['workingDirectory'])\n", (6202, 6232), False, 'import os\n'), ((5831, 5870), 'os.path.abspath', 'os.path.abspath', (["job_args['outputPath']"], {}), "(job_args['outputPath'])\n", (5846, 5870), False, 'import os\n'), ((6112, 6150), 'os.path.abspath', 'os.path.abspath', (["job_args['errorPath']"], {}), "(job_args['errorPath'])\n", (6127, 6150), False, 'import os\n'), ((5699, 5738), 'os.path.abspath', 'os.path.abspath', (["job_args['outputPath']"], {}), "(job_args['outputPath'])\n", (5714, 5738), False, 'import os\n'), ((5982, 6020), 'os.path.abspath', 'os.path.abspath', (["job_args['errorPath']"], {}), "(job_args['errorPath'])\n", (5997, 6020), False, 'import os\n')] |
from game_data_gatherer import GameDataGatherer
class GameAnalyzer:
def __add__(self, other):
if not isinstance(other, GameAnalyzer):
raise Exception("Invalid operation '__add__' on type " + str(type(self)) + " and " + str(type(other)))
for team in other.analysis:
if team not in self.analysis:
self.analysis[team] = {
"picks": other.analysis[team]["picks"],
"bans": other.analysis[team]["bans"]
}
continue
for pick in other.analysis[team]["picks"]:
if pick not in self.analysis[team]["picks"]:
self.analysis[team]["picks"][pick] = {"occurrances": 0, "wins": 0, "winrate": 0}
self.analysis[team]["picks"][pick]["wins"] += other.analysis[team]["picks"][pick]["wins"]
self.analysis[team]["picks"][pick]["occurrances"] += 1
self.analysis[team]["picks"][pick]["winrate"] = int((self.analysis[team]["picks"][pick]["wins"] / self.analysis[team]["picks"][pick]["occurrances"])*100)
for ban in other.analysis[team]["bans"]:
if ban not in self.analysis[team]["bans"]:
self.analysis[team]["bans"][ban] = {"occurrances": 1}
self.analysis[team]["bans"][ban]["occurrances"] += 1
return self
def __init__(self, gameUrl):
print("Analyzing game: ", gameUrl)
gameDataGatherer = GameDataGatherer(gameUrl)
gameData = gameDataGatherer.gatherData()
self.analysis = {}
for team in gameData:
self.analysis[team] = {
"picks": self.getPickSummary(gameData[team]),
"bans": self.getBanSummary(gameData[team])
}
# print("TEAM: ", team)
# print("Analysis: ", self.analysis[team])
def getPickSummary(self, games):
summary = {}
for game in games:
for pick in game["picks"]:
if(pick == ""):
continue
if pick not in summary:
summary[pick] = {"occurrances": 0, "wins": 0, "winrate": 0}
if game["win"]:
summary[pick]["wins"] += 1
summary[pick]["occurrances"] += 1
summary[pick]["winrate"] = int((summary[pick]["wins"] / summary[pick]["occurrances"])*100)
return summary
def getBanSummary(self, games):
summary = {}
for game in games:
for ban in game["bans"]:
if(ban == ""):
continue
if ban not in summary:
summary[ban] = {"occurrances": 1}
summary[ban]["occurrances"] += 1
return summary
| [
"game_data_gatherer.GameDataGatherer"
] | [((1516, 1541), 'game_data_gatherer.GameDataGatherer', 'GameDataGatherer', (['gameUrl'], {}), '(gameUrl)\n', (1532, 1541), False, 'from game_data_gatherer import GameDataGatherer\n')] |
# coding: utf-8
"""
turktools tests
mitcho (<NAME>), <EMAIL>, May 2013
The MIT License (MIT)
Copyright (c) 2013 <NAME>
See readme for license block
"""
from __future__ import print_function
import unittest, sys
# todo: import doctest
runner = unittest.TextTestRunner(verbosity=1 + sys.argv.count('-v'))
suite = unittest.TestLoader().loadTestsFromNames([
'tests.test_shared',
'tests.test_lister',
'tests.test_templater',
'tests.test_decoder',
])
raise SystemExit(not runner.run(suite).wasSuccessful())
| [
"sys.argv.count",
"unittest.TestLoader"
] | [((333, 354), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (352, 354), False, 'import unittest, sys\n'), ((302, 322), 'sys.argv.count', 'sys.argv.count', (['"""-v"""'], {}), "('-v')\n", (316, 322), False, 'import unittest, sys\n')] |
import collections
from supriya.enums import CalculationRate
from supriya.synthdefs import UGen
class PulseDivider(UGen):
"""
::
>>> pulse_divider = supriya.ugens.PulseDivider.ar(
... div=2,
... start=0,
... trigger=0,
... )
>>> pulse_divider
PulseDivider.ar()
"""
### CLASS VARIABLES ###
_ordered_input_names = collections.OrderedDict(
'trigger',
'div',
'start',
)
_valid_calculation_rates = None
### INITIALIZER ###
def __init__(
self,
calculation_rate=None,
div=2,
start=0,
trigger=0,
):
UGen.__init__(
self,
calculation_rate=calculation_rate,
div=div,
start=start,
trigger=trigger,
)
### PUBLIC METHODS ###
@classmethod
def ar(
cls,
div=2,
start=0,
trigger=0,
):
"""
Constructs an audio-rate PulseDivider.
::
>>> pulse_divider = supriya.ugens.PulseDivider.ar(
... div=2,
... start=0,
... trigger=0,
... )
>>> pulse_divider
PulseDivider.ar()
Returns ugen graph.
"""
import supriya.synthdefs
calculation_rate = supriya.CalculationRate.AUDIO
ugen = cls._new_expanded(
calculation_rate=calculation_rate,
div=div,
start=start,
trigger=trigger,
)
return ugen
@classmethod
def kr(
cls,
div=2,
start=0,
trigger=0,
):
"""
Constructs a control-rate PulseDivider.
::
>>> pulse_divider = supriya.ugens.PulseDivider.kr(
... div=2,
... start=0,
... trigger=0,
... )
>>> pulse_divider
PulseDivider.kr()
Returns ugen graph.
"""
import supriya.synthdefs
calculation_rate = supriya.CalculationRate.CONTROL
ugen = cls._new_expanded(
calculation_rate=calculation_rate,
div=div,
start=start,
trigger=trigger,
)
return ugen
### PUBLIC PROPERTIES ###
@property
def div(self):
"""
Gets `div` input of PulseDivider.
::
>>> pulse_divider = supriya.ugens.PulseDivider.ar(
... div=2,
... start=0,
... trigger=0,
... )
>>> pulse_divider.div
2.0
Returns ugen input.
"""
index = self._ordered_input_names.index('div')
return self._inputs[index]
@property
def start(self):
"""
Gets `start` input of PulseDivider.
::
>>> pulse_divider = supriya.ugens.PulseDivider.ar(
... div=2,
... start=0,
... trigger=0,
... )
>>> pulse_divider.start
0.0
Returns ugen input.
"""
index = self._ordered_input_names.index('start')
return self._inputs[index]
@property
def trigger(self):
"""
Gets `trigger` input of PulseDivider.
::
>>> pulse_divider = supriya.ugens.PulseDivider.ar(
... div=2,
... start=0,
... trigger=0,
... )
>>> pulse_divider.trigger
0.0
Returns ugen input.
"""
index = self._ordered_input_names.index('trigger')
return self._inputs[index]
| [
"collections.OrderedDict",
"supriya.synthdefs.UGen.__init__"
] | [((411, 461), 'collections.OrderedDict', 'collections.OrderedDict', (['"""trigger"""', '"""div"""', '"""start"""'], {}), "('trigger', 'div', 'start')\n", (434, 461), False, 'import collections\n'), ((693, 790), 'supriya.synthdefs.UGen.__init__', 'UGen.__init__', (['self'], {'calculation_rate': 'calculation_rate', 'div': 'div', 'start': 'start', 'trigger': 'trigger'}), '(self, calculation_rate=calculation_rate, div=div, start=start,\n trigger=trigger)\n', (706, 790), False, 'from supriya.synthdefs import UGen\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2020-06-30 10:19:04
# @Author : gwentmaster(<EMAIL>)
# I regret in my life
"""summary
"""
import json
import os
import re
from collections import defaultdict
from typing import Any, Dict, List, Generator, Union
import click
TEMPLATE = "INSERT INTO {table} ({columns})\nVALUES ({values});\n\n"
def judge_file(file: str, type_: str) -> bool:
if not os.path.isfile(file):
return False
if not file.endswith(f".{type_}"):
return False
return True
def judge_type(data: str) -> Union[int, float, str]:
if data in ("null", "NULL"):
return None
try:
result = int(data)
return result
except ValueError:
pass
try:
result = float(data)
return result
except ValueError:
pass
if (data[0] in ("'", "\"")) and (data[-1] == data[0]):
return data[1:-1]
return data
def gen_value(dic: Dict, keys: List[str], need_repr: bool = True) -> Generator:
for key in keys:
data = dic[key]
if data is None:
yield "NULL"
continue
if data is True:
yield "1"
continue
if data is False:
yield "0"
continue
if need_repr:
yield repr(dic[key])
else:
yield str(dic[key])
def json2sql(json_file: str, target: str = "data.sql") -> None:
if not judge_file(json_file, "json"):
print("not a json file")
return
with open(json_file, encoding="utf8") as f:
data = json.load(f)
sql = open(target, "wb")
for table in data:
for obj_dic in data[table]:
keys = sorted(obj_dic)
columns = ", ".join(keys)
values = ", ".join(gen_value(obj_dic, keys))
sql.write(TEMPLATE.format(table=table,
columns=columns,
values=values).encode("utf8"))
sql.close()
def sql2json(sql_file: str, target: str = "data.json") -> None:
if not judge_file(sql_file, "sql"):
print("not a sql file")
return
pattern = re.compile(
r"INSERT\sINTO\s(.+)\s\((.+)\)[\s\n]+VALUES\s\((.+)\)"
)
data_dic = defaultdict(list)
with open(sql_file, encoding="utf8") as f:
sqls = f.read().split(";")
for sql in sqls:
search = pattern.search(sql)
if search:
table = search.group(1)
columns = search.group(2).split(", ")
raw_values = search.group(3).split(", ")
values = []
for v in raw_values:
values.append(judge_type(v))
data_dic[table].append(dict(zip(columns, values)))
with open(target, "wb") as t:
t.write(
json.dumps(data_dic, ensure_ascii=False, indent=2)
.encode("utf8")
)
def _json2csv(data: Dict, key: str, target: str):
columns = set()
data_li = []
for d in data[key]:
columns.update(d.keys())
temp_dic = defaultdict(lambda: "") # type: Dict[str, Any]
temp_dic.update(d)
data_li.append(temp_dic)
columns = sorted(columns)
t = open(target, "wb")
t.write(f'{",".join(columns)}\n'.encode("utf-8"))
for d in data_li:
t.write(
f'{",".join(gen_value(d, columns, need_repr=False))}\n'
.encode("utf-8")
)
t.close()
def json2csv(json_file: str, target: str = "data.csv") -> None:
if not judge_file(json_file, "json"):
print("not a json file")
return
with open(json_file, encoding="utf8") as f:
data = json.load(f)
keys = list(data.keys())
if len(keys) > 1:
while True:
try:
for i, key in enumerate(keys):
print(f"{i}. {key}")
print(f"{len(keys)}. 全部")
choose = int(input("请选择想转换的数据: "))
except ValueError:
pass
else:
if choose in range(len(keys) + 1):
break
if choose == len(keys):
for key in keys:
_json2csv(data, key, f"{key}.csv")
return None
else:
choose = 0
_json2csv(data, keys[choose], target)
def csv2json(
csv_file: str,
target: str = "data.json",
table: str = "data"
) -> None:
if not judge_file(csv_file, "csv"):
print("not a csv file")
return
data = {table: []} # type: Dict[str, List]
with open(csv_file, encoding="utf8") as f:
for i, line in enumerate(f.readlines()):
line = line.strip()
if i == 0:
columns = line.split(",")
continue
values = []
for v in line.split(","):
values.append(judge_type(v))
data[table].append(dict(zip(columns, values)))
with open(target, "wb") as t:
t.write(json.dumps(data, ensure_ascii=False, indent=2).encode("utf8"))
@click.command()
@click.argument("file")
@click.option(
"--reverse",
is_flag=True,
default=False,
help="是否将其他文件转成json"
)
@click.option(
"--csv",
is_flag=True,
default=False,
help="json与csv转化"
)
@click.option(
"--out",
type=str,
default=None,
help="输出文件名",
show_default=False
)
def main(
file: str,
reverse: bool,
csv: bool,
out: str
):
if out is None:
out = "data"
if (reverse is False) and (csv is False):
json2sql(file, out.rstrip(".sql") + ".sql")
elif (reverse is True) and (csv is False):
sql2json(file, out.rstrip(".json") + ".json")
elif (reverse is False) and (csv is True):
json2csv(file, out.rstrip(".csv") + ".csv")
else:
csv2json(file, out.rstrip(".json") + ".json")
if __name__ == "__main__":
main()
| [
"click.argument",
"re.compile",
"click.option",
"json.dumps",
"os.path.isfile",
"collections.defaultdict",
"json.load",
"click.command"
] | [((5105, 5120), 'click.command', 'click.command', ([], {}), '()\n', (5118, 5120), False, 'import click\n'), ((5122, 5144), 'click.argument', 'click.argument', (['"""file"""'], {}), "('file')\n", (5136, 5144), False, 'import click\n'), ((5146, 5222), 'click.option', 'click.option', (['"""--reverse"""'], {'is_flag': '(True)', 'default': '(False)', 'help': '"""是否将其他文件转成json"""'}), "('--reverse', is_flag=True, default=False, help='是否将其他文件转成json')\n", (5158, 5222), False, 'import click\n'), ((5242, 5311), 'click.option', 'click.option', (['"""--csv"""'], {'is_flag': '(True)', 'default': '(False)', 'help': '"""json与csv转化"""'}), "('--csv', is_flag=True, default=False, help='json与csv转化')\n", (5254, 5311), False, 'import click\n'), ((5331, 5410), 'click.option', 'click.option', (['"""--out"""'], {'type': 'str', 'default': 'None', 'help': '"""输出文件名"""', 'show_default': '(False)'}), "('--out', type=str, default=None, help='输出文件名', show_default=False)\n", (5343, 5410), False, 'import click\n'), ((2189, 2264), 're.compile', 're.compile', (['"""INSERT\\\\sINTO\\\\s(.+)\\\\s\\\\((.+)\\\\)[\\\\s\\\\n]+VALUES\\\\s\\\\((.+)\\\\)"""'], {}), "('INSERT\\\\sINTO\\\\s(.+)\\\\s\\\\((.+)\\\\)[\\\\s\\\\n]+VALUES\\\\s\\\\((.+)\\\\)')\n", (2199, 2264), False, 'import re\n'), ((2286, 2303), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2297, 2303), False, 'from collections import defaultdict\n'), ((421, 441), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (435, 441), False, 'import os\n'), ((1597, 1609), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1606, 1609), False, 'import json\n'), ((3128, 3152), 'collections.defaultdict', 'defaultdict', (["(lambda : '')"], {}), "(lambda : '')\n", (3139, 3152), False, 'from collections import defaultdict\n'), ((3728, 3740), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3737, 3740), False, 'import json\n'), ((2873, 2923), 'json.dumps', 'json.dumps', (['data_dic'], {'ensure_ascii': '(False)', 'indent': '(2)'}), '(data_dic, ensure_ascii=False, indent=2)\n', (2883, 2923), False, 'import json\n'), ((5039, 5085), 'json.dumps', 'json.dumps', (['data'], {'ensure_ascii': '(False)', 'indent': '(2)'}), '(data, ensure_ascii=False, indent=2)\n', (5049, 5085), False, 'import json\n')] |
import numpy
import time
import threading
import matplotlib.pyplot
from matplotlib.animation import FuncAnimation
class VisualizationWindow:
def __init__(self, signal_collector):
self.figure, self.axes = matplotlib.pyplot.subplots(7, 1, sharex=True)
self.figure.subplots_adjust(hspace=0)
self.signal_collector = signal_collector
def create_empty_line(ax_index, *args):
return self.axes[ax_index].plot([], [], *args)[0]
self.acceleration_lines = [create_empty_line(0), create_empty_line(0), create_empty_line(0)]
self.breathing_line = create_empty_line(1)
self.ecg_line = create_empty_line(2)
self.respiration_rate_line = create_empty_line(3, "+")
self.heart_rate_line = create_empty_line(4, "+")
self.heartbeat_interval_line = create_empty_line(5, "+")
self.activity_line = create_empty_line(6, "+")
self.artists = self.acceleration_lines + [self.breathing_line, self.ecg_line, self.respiration_rate_line, self.heart_rate_line, self.heartbeat_interval_line, self.activity_line]
self.axes[0].set_ylim((-4, 4))
self.axes[1].set_ylim((-1000, 1000))
self.axes[2].set_ylim((-500, 500))
self.axes[3].set_ylim((0, 50))
self.axes[4].set_ylim((0, 120))
self.axes[5].set_ylim((0, 2))
self.axes[6].set_ylim((0, 2))
def update_plots(self, framedata):
for stream_name, stream in self.signal_collector.iterate_signal_streams():
signal_value_array = numpy.array(stream.samples, dtype=float)
x_values = numpy.arange(len(signal_value_array), dtype=float)
x_values /= stream.samplerate
x_values += stream.end_timestamp - len(signal_value_array) / stream.samplerate
if stream_name == "acceleration":
for line_i, line in enumerate(self.acceleration_lines):
line.set_xdata(x_values)
line.set_ydata(signal_value_array[:, line_i])
elif stream_name == "breathing":
self.breathing_line.set_xdata(x_values)
self.breathing_line.set_ydata(signal_value_array)
elif stream_name == "ecg":
self.ecg_line.set_xdata(x_values)
self.ecg_line.set_ydata(signal_value_array)
for stream_name, event_list in self.signal_collector.iterate_event_streams():
if len(event_list) == 0:
continue
event_data_array = numpy.array(event_list, dtype=float)
event_timestamps = event_data_array[:, 0]
event_values = event_data_array[:, 1]
event_line_object_map = {"heart_rate": self.heart_rate_line,
"respiration_rate": self.respiration_rate_line,
"heartbeat_interval": self.heartbeat_interval_line,
"activity": self.activity_line}
event_line_object = event_line_object_map[stream_name]
if event_line_object is not None:
event_line_object.set_xdata(event_timestamps)
event_line_object.set_ydata(event_values)
now = time.time()
self.axes[0].set_xlim((now - 115, now + 5))
return self.artists
def show(self):
anim = FuncAnimation(self.figure, self.update_plots, interval=1000, blit=False)
matplotlib.pyplot.show()
| [
"matplotlib.animation.FuncAnimation",
"numpy.array",
"time.time"
] | [((3421, 3432), 'time.time', 'time.time', ([], {}), '()\n', (3430, 3432), False, 'import time\n'), ((3562, 3634), 'matplotlib.animation.FuncAnimation', 'FuncAnimation', (['self.figure', 'self.update_plots'], {'interval': '(1000)', 'blit': '(False)'}), '(self.figure, self.update_plots, interval=1000, blit=False)\n', (3575, 3634), False, 'from matplotlib.animation import FuncAnimation\n'), ((1588, 1628), 'numpy.array', 'numpy.array', (['stream.samples'], {'dtype': 'float'}), '(stream.samples, dtype=float)\n', (1599, 1628), False, 'import numpy\n'), ((2643, 2679), 'numpy.array', 'numpy.array', (['event_list'], {'dtype': 'float'}), '(event_list, dtype=float)\n', (2654, 2679), False, 'import numpy\n')] |
from math import trunc
num=float(input('Digite um número: '))
truncado=trunc(num)
print('O resultado truncado de {} é {}.'.format(num,truncado))
| [
"math.trunc"
] | [((71, 81), 'math.trunc', 'trunc', (['num'], {}), '(num)\n', (76, 81), False, 'from math import trunc\n')] |
import graphene
from graphql_extensions.auth.decorators import login_required
from ..helpers.permission_required import role_required, token_required
from ..helpers.validate_object_id import validate_object_id
from ..helpers.validation_errors import error_dict
from ..helpers.constants import SUCCESS_ACTION
from .models import (
Tags,
Author,
Book,
Category,
Publisher,
Premium
)
from .validators.validate_input import BookValidations
from .object_types import (
BookInput,BookType,
AuthorType,AuthorInput,
BookTagsType,BookTagsInput,
BookCategoryType,BookCategoryInput,
PublisherInput,PublisherType,
BookPremiumType,BookPremiumInput
)
from datetime import datetime
class CreateBook(graphene.Mutation):
'''Handle addition of a book and handle saving it to the db'''
# items that the mutation will return
book = graphene.Field(BookType)
status = graphene.String()
message = graphene.String()
class Arguments:
'''Arguments to be passed in during the book creation'''
input = BookInput(required=True)
@staticmethod
@token_required
@login_required
def mutate(self, info, **kwargs):
'''Mutation for user creation. Actual saving happens here'''
error_msg = error_dict['admin_only'].format("Add a Book")
role_required(info.context.user, ['admin', 'manager'], error_msg)
validator = BookValidations()
data = validator.validate_book_data(
kwargs.get("input", ''))
authors = data.pop("author", [])
tags = data.pop("tags",[])
categories = data.pop("categories",[])
new_book = Book(**data)
new_book.save()
for author in authors:
author_ = Author.objects.get(id=author)
new_book.author.add(author_)
for tag in tags:
tag_ = Tags.objects.get(id=tag)
new_book.tags.add(tag_)
for category in categories:
category_ = Category.objects.get(id=category)
new_book.categories.add(category_)
return CreateBook(status="Success", book=new_book,
message=SUCCESS_ACTION.format("Book added"))
class UpdateBook(graphene.Mutation):
"""
handles updating of books
"""
book_val = graphene.Field(BookType)
status = graphene.String()
message = graphene.String()
class Arguments:
input = BookInput(required=True)
id = graphene.String(required=True)
@staticmethod
@token_required
@login_required
def mutate(root,info,**kwargs):
error_msg=error_dict['admin_only'].format('update a book')
role_required(info.context.user,['admin','manager'],error_msg)
id = kwargs.get('id',None)
book = validate_object_id(id,Book,"Book")
data = kwargs['input']
tags = data.pop('tags',[])
categories = data.pop('categories',[])
for tag in tags:
add_tag = Tags(**tag)
add_tag.save()
book.tags.add(add_tag)
for category in categories:
add_category = Category(**category)
add_category.save()
book.categories.add(add_category)
for (key,value) in data.items():
setattr(book,key,value)
book.save()
status = "Success"
message = SUCCESS_ACTION.format("Book Entry has been updated")
return UpdateBook(status = status,book_val=book,message=message)
class CreatePremiumBooks(graphene.Mutation):
'''Handle addition of a book and handle saving it to the db'''
# items that the mutation will return
premium_book = graphene.Field(BookPremiumType)
status = graphene.String()
message = graphene.String()
class Arguments:
'''Arguments to be passed in during the book creation'''
input = BookPremiumInput(required=True)
@staticmethod
@token_required
@login_required
def mutate(self, info, **kwargs):
'''Mutation for user creation. Actual saving happens here'''
error_msg = error_dict['admin_only'].format("Add Premium Books")
role_required(info.context.user, ['admin', 'manager'], error_msg)
validator = BookValidations()
data = validator.validate_premium_book_data(
kwargs.get("input", ''))
books = data.pop("content",[])
new_premium_book = Premium(**data)
new_premium_book.save()
for book in books:
book_ = Book(**book)
new_book.content.add(book_)
return CreatePremiumBooks(status="Success", premium_book=new_premium_book,
message=SUCCESS_ACTION.format("Premium Books added"))
class UpdatePremiumBook(graphene.Mutation):
"""
handles updating of books
"""
premium_book_set = graphene.Field(BookPremiumType)
status = graphene.String()
message = graphene.String()
class Arguments:
input = BookPremiumInput(required=True)
id = graphene.String(required=True)
@staticmethod
@token_required
@login_required
def mutate(root,info,**kwargs):
error_msg=error_dict['admin_only'].format('update a premium book')
role_required(info.context.user,['admin','manager'],error_msg)
id = kwargs.get('id',None)
premium_book = validate_object_id(id,Premium,"Premium")
data = kwargs['input']
books = data.pop('content',[])
for book in books:
add_book = Book(**book)
add_book.save()
premium_book.content.add(add_book)
for (key,value) in data.items():
setattr(premium_book,key,value)
premium_book.save()
status = "Success"
message = SUCCESS_ACTION.format("Premium Book Entry has been updated")
return UpdatePremiumBook(status = status,premium_book_set=premium_book,message=message)
class CreatePulisher(graphene.Mutation):
"""
handles addition of new Publisher
"""
publisher = graphene.Field(PublisherType)
status = graphene.String()
message = graphene.String()
class Arguments:
'''Arguments to be passed in during the publisher creation'''
input = PublisherInput(required=True)
@staticmethod
@token_required
@login_required
def mutate(self, info, **kwargs):
error_msg = error_dict['admin_only'].format("Add a Publisher")
role_required(info.context.user, ['admin', 'manager'], error_msg)
data = kwargs['input']
new_publisher = Publisher(**data)
new_publisher.save()
return CreatePulisher(status="Success", publisher=new_publisher,
message=SUCCESS_ACTION.format("Publisher added"))
class UpdatePublisher(graphene.Mutation):
"""
handles updating of books
"""
publisher_entry = graphene.Field(PublisherType)
status = graphene.String()
message = graphene.String()
class Arguments:
input = PublisherInput(required=True)
id = graphene.String(required=True)
@staticmethod
@token_required
@login_required
def mutate(root,info,**kwargs):
error_msg=error_dict['admin_only'].format('update publisher')
role_required(info.context.user,['admin','manager'],error_msg)
id = kwargs.get('id',None)
publisher = validate_object_id(id,Publisher,"Publisher")
data = kwargs['input']
for (key,value) in data.items():
setattr(publisher,key,value)
publisher.save()
status = "Success"
message = SUCCESS_ACTION.format("Publisher has been updated")
return UpdatePublisher(status = status,publisher_entry=publisher,message=message)
class Mutation(graphene.ObjectType):
create_book = CreateBook.Field()
create_premium_books = CreatePremiumBooks.Field()
create_publisher = CreatePulisher.Field()
update_book = UpdateBook.Field()
update_premium_book = UpdatePremiumBook.Field()
update_publisher = UpdatePublisher.Field()
| [
"graphene.String",
"graphene.Field"
] | [((875, 899), 'graphene.Field', 'graphene.Field', (['BookType'], {}), '(BookType)\n', (889, 899), False, 'import graphene\n'), ((913, 930), 'graphene.String', 'graphene.String', ([], {}), '()\n', (928, 930), False, 'import graphene\n'), ((945, 962), 'graphene.String', 'graphene.String', ([], {}), '()\n', (960, 962), False, 'import graphene\n'), ((2307, 2331), 'graphene.Field', 'graphene.Field', (['BookType'], {}), '(BookType)\n', (2321, 2331), False, 'import graphene\n'), ((2345, 2362), 'graphene.String', 'graphene.String', ([], {}), '()\n', (2360, 2362), False, 'import graphene\n'), ((2377, 2394), 'graphene.String', 'graphene.String', ([], {}), '()\n', (2392, 2394), False, 'import graphene\n'), ((3663, 3694), 'graphene.Field', 'graphene.Field', (['BookPremiumType'], {}), '(BookPremiumType)\n', (3677, 3694), False, 'import graphene\n'), ((3708, 3725), 'graphene.String', 'graphene.String', ([], {}), '()\n', (3723, 3725), False, 'import graphene\n'), ((3740, 3757), 'graphene.String', 'graphene.String', ([], {}), '()\n', (3755, 3757), False, 'import graphene\n'), ((4836, 4867), 'graphene.Field', 'graphene.Field', (['BookPremiumType'], {}), '(BookPremiumType)\n', (4850, 4867), False, 'import graphene\n'), ((4881, 4898), 'graphene.String', 'graphene.String', ([], {}), '()\n', (4896, 4898), False, 'import graphene\n'), ((4913, 4930), 'graphene.String', 'graphene.String', ([], {}), '()\n', (4928, 4930), False, 'import graphene\n'), ((6028, 6057), 'graphene.Field', 'graphene.Field', (['PublisherType'], {}), '(PublisherType)\n', (6042, 6057), False, 'import graphene\n'), ((6071, 6088), 'graphene.String', 'graphene.String', ([], {}), '()\n', (6086, 6088), False, 'import graphene\n'), ((6103, 6120), 'graphene.String', 'graphene.String', ([], {}), '()\n', (6118, 6120), False, 'import graphene\n'), ((6876, 6905), 'graphene.Field', 'graphene.Field', (['PublisherType'], {}), '(PublisherType)\n', (6890, 6905), False, 'import graphene\n'), ((6919, 6936), 'graphene.String', 'graphene.String', ([], {}), '()\n', (6934, 6936), False, 'import graphene\n'), ((6951, 6968), 'graphene.String', 'graphene.String', ([], {}), '()\n', (6966, 6968), False, 'import graphene\n'), ((2471, 2501), 'graphene.String', 'graphene.String', ([], {'required': '(True)'}), '(required=True)\n', (2486, 2501), False, 'import graphene\n'), ((5014, 5044), 'graphene.String', 'graphene.String', ([], {'required': '(True)'}), '(required=True)\n', (5029, 5044), False, 'import graphene\n'), ((7050, 7080), 'graphene.String', 'graphene.String', ([], {'required': '(True)'}), '(required=True)\n', (7065, 7080), False, 'import graphene\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import h5py as h5
import os.path as fs
import keras
from keras.models import Sequential
from keras.utils import np_utils
from keras.layers.core import Dense, Activation, Dropout
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_digits
from sklearn.linear_model import Perceptron
from sklearn.metrics import f1_score
from scipy.spatial import distance
from sklearn.preprocessing import normalize
from sklearn.metrics import accuracy_score
import logging as logg
from sklearn.decomposition import PCA
from sklearn import preprocessing
import umap
#%%
def readHDF5file(PathToSave, SavedFileName, list_group_name):
data = []
ff = h5.File(fs.join(PathToSave, SavedFileName), 'r')
for group in list_group_name:
data.append(ff[group][...])
ff.close()
return data
def saveHDF5file(PathToSave, SavedFileName, list_group_name, data):
num_group = len(list_group_name)
num_data = len(data)
if num_group != num_data:
raise RuntimeError('Group name list and data list length do not match!')
ff = h5.File(fs.join(PathToSave, SavedFileName), 'w')
for i, group in enumerate(list_group_name):
ff.create_dataset(group, data = data[i])
ff.close()
return None
#%%
def pca_result(activations, n_comp):
embedding = PCA(n_components= n_comp).fit_transform(activations)
return embedding
def umap_result(activations, n_comp):
embedding = umap.UMAP(n_components=n_comp).fit_transform(activations)
return embedding
#%%
def TrainPerceptron(latent_space):
n = len(latent_space)
shape_ls = latent_space.shape[1]
labels = np.empty((n, 1), dtype = np.int32)
labels[:15000], labels[15000:30000], labels[30000:] = 0, 1, 2
#labels[:5000], labels[5000:10000], labels[10000:] = 0, 1, 2
y_train = np_utils.to_categorical(labels)
standardized_latent_space = preprocessing.scale(latent_space)
model = Sequential()
model.add(Dense(3, input_dim= shape_ls))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='Nadam')
model.summary()
model.fit(standardized_latent_space, y_train, epochs = 250, batch_size=128, validation_split=0.3, shuffle = True, verbose=2)
optim = keras.optimizers.SGD(lr=0.02, decay=1e-2/300)
model.compile(loss='categorical_crossentropy', optimizer=optim)
model.fit(standardized_latent_space, y_train, epochs = 300, batch_size=128, validation_split=0.3, shuffle = True, verbose=2)
predict = model.predict(standardized_latent_space, batch_size=4096)
predict = np.heaviside(predict - 0.5, 1).astype(np.int32)
score = f1_score(y_train, predict, average='micro')
return score
#%%
RootPathLatentSpace = ''
logg.basicConfig(filename=fs.join(RootPathLatentSpace, "LatentSpaceLogger.log"), level=logg.INFO)
logg
umap_shape_mnist = [0, 1024, 512, 256, 128, 64, 32, 16, 8, 4, 2]
umap_shape_unet = [0, 512, 256, 128, 64, 32, 16, 8, 4, 2]
PathToDataSet = ''
PathToModel = ''
NamesDataSet = ['']
name_loss_list = ['']
name_NN_list = ['']
num_layers = [[], []]
precision = np.ones((len(name_NN_list), len(name_loss_list), 5, len(NamesDataSet),\
7, 11, 2), dtype = np.float32)
precision = precision*(-1.)
"""
for iter_NN in range(len(name_NN_list)):
for iter_loss in range(len(name_loss_list)):
for launch_num in range(5):
for data_iter, data in enumerate(NamesDataSet):
number_layer = num_layers[iter_NN]
for li, layer_iter in enumerate(number_layer):
latent_space= readHDF5file(RootPathLatentSpace,\
'LatentSpace_Model%s_Loss%s_Launch%d_Layer%d,hdf5'%(name_NN_list[iter_NN],\
name_loss_list[iter_loss],\
launch_num + 1,\
layer_iter),\
['latent_space'])[0]
if iter_NN == 0:
compress_list = umap_shape_mnist
else:
compress_list = umap_shape_unet
for dim_iter, dim in enumerate(compress_list):
if dim != 0:
ls_pca = pca_result(latent_space, dim)
f1_score_pca = TrainPerceptron(ls_pca)
logg.info('%d / %d, %d / %d, %d / %d, %d / %d, %d / %d, %d / %d pca score = %f'%(iter_NN + 1, len(name_NN_list), \
iter_loss + 1, len(name_loss_list),\
launch_num + 1, 5,\
data_iter + 1, len(NamesDataSet),\
li + 1, len(number_layer),\
dim_iter + 1, len(compress_list),\
f1_score_pca))
precision[iter_NN, iter_loss, launch_num, data_iter, li,\
dim_iter, 0] = f1_score_pca
ls_umap = umap_result(latent_space, dim)
f1_score_umap = TrainPerceptron(ls_umap)
logg.info('%d / %d, %d / %d, %d / %d, %d / %d, %d / %d, %d / %d umap score = %f'%(iter_NN + 1, len(name_NN_list), \
iter_loss + 1, len(name_loss_list),\
launch_num + 1, 5,\
data_iter + 1, len(NamesDataSet),\
li + 1, len(number_layer),\
dim_iter + 1, len(compress_list),\
f1_score_umap))
precision[iter_NN, iter_loss, launch_num, data_iter, li,\
dim_iter, 1] = f1_score_umap
else:
f1_score = TrainPerceptron(latent_space)
logg.info('%d / %d, %d / %d, %d / %d, %d / %d, %d / %d, %d / %d score = %f'%(iter_NN + 1, len(name_NN_list), \
iter_loss + 1, len(name_loss_list),\
launch_num + 1, 5,\
data_iter + 1, len(NamesDataSet),\
li + 1, len(number_layer),\
dim_iter + 1, len(compress_list),\
f1_score))
precision[iter_NN, iter_loss, launch_num, data_iter, li,\
dim_iter, 0] = f1_score
ff = h5.File(fs.join(RootPathLatentSpace, 'preceptron', 'perceptron.hdf5'), 'w')
ff.create_dataset('precision', precision)
ff.close()
"""
#%%
"""
NN1
"""
RootPathLatentSpace = ''
logg.basicConfig(filename=fs.join(RootPathLatentSpace, "LatentSpaceLogger.log"), level=logg.INFO)
logg
umap_shape_mnist = [0, 1024, 512, 256, 128, 64, 32, 16, 8, 4, 2]
PathToDataSet = ''
PathToModel = ''
NamesDataSet = ['OnlyColor.hdf5',\
'OnlyH.hdf5',\
'OnlyX.hdf5',\
'Only.hdf5']
name_loss_list = ['weighted_categorical_crossentropy',\
'dice_loss']
name_NN_list = ['ezConvAutoEncoderForMnist', 'UnetСircumcised',\
'UnetWithSeparableConvСircumcised']
num_layers = [6, 7]
iter_NN = 0
for iter_loss in range(len(name_loss_list)):
for launch_num in range(5):
for data_iter, data in enumerate(NamesDataSet):
number_layer = num_layers[iter_NN]
for li, layer_iter in enumerate(number_layer):
latent_space= readHDF5file(RootPathLatentSpace,\
'LatentSpace_Model%s_Loss%s_Launch%d_Layer%d,hdf5'%(name_NN_list[iter_NN],\
name_loss_list[iter_loss],\
launch_num + 1,\
layer_iter),\
['latent_space'])[0]
if iter_NN == 0:
compress_list = umap_shape_mnist
else:
compress_list = umap_shape_unet
precision = np.ones(len(compress_list), 2)
for dim_iter, dim in enumerate(compress_list):
if dim != 0:
ls_pca = pca_result(latent_space, dim)
f1_score_pca = TrainPerceptron(ls_pca)
logg.info('%d / %d, %d / %d, %d / %d, %d / %d, %d / %d, %d / %d pca score = %f'%(iter_NN + 1, len(name_NN_list), \
iter_loss + 1, len(name_loss_list),\
launch_num + 1, 5,\
data_iter + 1, len(NamesDataSet),\
li + 1, len(number_layer),\
dim_iter + 1, len(compress_list),\
f1_score_pca))
precision[dim_iter, 0] = f1_score_pca
ls_umap = umap_result(latent_space, dim)
f1_score_umap = TrainPerceptron(ls_umap)
logg.info('%d / %d, %d / %d, %d / %d, %d / %d, %d / %d, %d / %d umap score = %f'%(iter_NN + 1, len(name_NN_list), \
iter_loss + 1, len(name_loss_list),\
launch_num + 1, 5,\
data_iter + 1, len(NamesDataSet),\
li + 1, len(number_layer),\
dim_iter + 1, len(compress_list),\
f1_score_umap))
precision[dim_iter, 1] = f1_score_umap
else:
f1_score = TrainPerceptron(latent_space)
logg.info('%d / %d, %d / %d, %d / %d, %d / %d, %d / %d, %d / %d score = %f'%(iter_NN + 1, len(name_NN_list), \
iter_loss + 1, len(name_loss_list),\
launch_num + 1, 5,\
data_iter + 1, len(NamesDataSet),\
li + 1, len(number_layer),\
dim_iter + 1, len(compress_list),\
f1_score))
precision[dim_iter, 0] = f1_score
precision[dim_iter, 1] = f1_score
ff = h5.File(fs.join(RootPathLatentSpace, 'preceptron',\
'perceptron_Model%s_Loss%s_Launch%d_Layer%d.hdf5'%(name_NN_list[0],\
name_loss_list[iter_loss],\
launch_num + 1,\
layer_iter)), 'w')
ff.create_dataset('precision', precision)
ff.close()
#%%
"""
NN2
"""
RootPathLatentSpace = ''
logg.basicConfig(filename=fs.join(RootPathLatentSpace, "LatentSpaceLogger.log"), level=logg.INFO)
logg
umap_shape_mnist = [0, 1024, 512, 256, 128, 64, 32, 16, 8, 4, 2]
PathToDataSet = ''
PathToModel = ''
NamesDataSet = ['OnlyColor.hdf5',\
'OnlyH.hdf5',\
'OnlyX.hdf5',\
'Only.hdf5']
name_loss_list = ['weighted_categorical_crossentropy',\
'dice_loss']
name_NN_list = ['ezConvAutoEncoderForMnist', 'UnetСircumcised',\
'UnetWithSeparableConvСircumcised']
num_layers = [6, 7]
iter_NN = 0
for iter_loss in range(len(name_loss_list)):
for launch_num in range(5):
for data_iter, data in enumerate(NamesDataSet):
number_layer = num_layers[iter_NN]
for li, layer_iter in enumerate(number_layer):
latent_space= readHDF5file(RootPathLatentSpace,\
'LatentSpace_Model%s_Loss%s_Launch%d_Layer%d,hdf5'%(name_NN_list[iter_NN],\
name_loss_list[iter_loss],\
launch_num + 1,\
layer_iter),\
['latent_space'])[0]
if iter_NN == 0:
compress_list = umap_shape_mnist
else:
compress_list = umap_shape_unet
precision = np.ones(len(compress_list), 2)
for dim_iter, dim in enumerate(compress_list):
if dim != 0:
ls_pca = pca_result(latent_space, dim)
f1_score_pca = TrainPerceptron(ls_pca)
logg.info('%d / %d, %d / %d, %d / %d, %d / %d, %d / %d, %d / %d pca score = %f'%(iter_NN + 1, len(name_NN_list), \
iter_loss + 1, len(name_loss_list),\
launch_num + 1, 5,\
data_iter + 1, len(NamesDataSet),\
li + 1, len(number_layer),\
dim_iter + 1, len(compress_list),\
f1_score_pca))
precision[dim_iter, 0] = f1_score_pca
ls_umap = umap_result(latent_space, dim)
f1_score_umap = TrainPerceptron(ls_umap)
logg.info('%d / %d, %d / %d, %d / %d, %d / %d, %d / %d, %d / %d umap score = %f'%(iter_NN + 1, len(name_NN_list), \
iter_loss + 1, len(name_loss_list),\
launch_num + 1, 5,\
data_iter + 1, len(NamesDataSet),\
li + 1, len(number_layer),\
dim_iter + 1, len(compress_list),\
f1_score_umap))
precision[dim_iter, 1] = f1_score_umap
else:
f1_score = TrainPerceptron(latent_space)
logg.info('%d / %d, %d / %d, %d / %d, %d / %d, %d / %d, %d / %d score = %f'%(iter_NN + 1, len(name_NN_list), \
iter_loss + 1, len(name_loss_list),\
launch_num + 1, 5,\
data_iter + 1, len(NamesDataSet),\
li + 1, len(number_layer),\
dim_iter + 1, len(compress_list),\
f1_score))
precision[dim_iter, 0] = f1_score
precision[dim_iter, 1] = f1_score
ff = h5.File(fs.join(RootPathLatentSpace, 'preceptron',\
'perceptron_Model%s_Loss%s_Launch%d_Layer%d.hdf5'%(name_NN_list[0],\
name_loss_list[iter_loss],\
launch_num + 1,\
layer_iter)), 'w')
ff.create_dataset('precision', precision)
ff.close()
| [
"sklearn.metrics.f1_score",
"keras.layers.core.Activation",
"sklearn.decomposition.PCA",
"os.path.join",
"numpy.heaviside",
"keras.models.Sequential",
"keras.optimizers.SGD",
"keras.utils.np_utils.to_categorical",
"numpy.empty",
"umap.UMAP",
"sklearn.preprocessing.scale",
"keras.layers.core.De... | [((1697, 1729), 'numpy.empty', 'np.empty', (['(n, 1)'], {'dtype': 'np.int32'}), '((n, 1), dtype=np.int32)\n', (1705, 1729), True, 'import numpy as np\n'), ((1871, 1902), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['labels'], {}), '(labels)\n', (1894, 1902), False, 'from keras.utils import np_utils\n'), ((1933, 1966), 'sklearn.preprocessing.scale', 'preprocessing.scale', (['latent_space'], {}), '(latent_space)\n', (1952, 1966), False, 'from sklearn import preprocessing\n'), ((1980, 1992), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1990, 1992), False, 'from keras.models import Sequential\n'), ((2294, 2341), 'keras.optimizers.SGD', 'keras.optimizers.SGD', ([], {'lr': '(0.02)', 'decay': '(0.01 / 300)'}), '(lr=0.02, decay=0.01 / 300)\n', (2314, 2341), False, 'import keras\n'), ((2675, 2718), 'sklearn.metrics.f1_score', 'f1_score', (['y_train', 'predict'], {'average': '"""micro"""'}), "(y_train, predict, average='micro')\n", (2683, 2718), False, 'from sklearn.metrics import f1_score\n'), ((780, 814), 'os.path.join', 'fs.join', (['PathToSave', 'SavedFileName'], {}), '(PathToSave, SavedFileName)\n', (787, 814), True, 'import os.path as fs\n'), ((1161, 1195), 'os.path.join', 'fs.join', (['PathToSave', 'SavedFileName'], {}), '(PathToSave, SavedFileName)\n', (1168, 1195), True, 'import os.path as fs\n'), ((2005, 2033), 'keras.layers.core.Dense', 'Dense', (['(3)'], {'input_dim': 'shape_ls'}), '(3, input_dim=shape_ls)\n', (2010, 2033), False, 'from keras.layers.core import Dense, Activation, Dropout\n'), ((2048, 2069), 'keras.layers.core.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (2058, 2069), False, 'from keras.layers.core import Dense, Activation, Dropout\n'), ((2793, 2846), 'os.path.join', 'fs.join', (['RootPathLatentSpace', '"""LatentSpaceLogger.log"""'], {}), "(RootPathLatentSpace, 'LatentSpaceLogger.log')\n", (2800, 2846), True, 'import os.path as fs\n'), ((6746, 6799), 'os.path.join', 'fs.join', (['RootPathLatentSpace', '"""LatentSpaceLogger.log"""'], {}), "(RootPathLatentSpace, 'LatentSpaceLogger.log')\n", (6753, 6799), True, 'import os.path as fs\n'), ((10863, 10916), 'os.path.join', 'fs.join', (['RootPathLatentSpace', '"""LatentSpaceLogger.log"""'], {}), "(RootPathLatentSpace, 'LatentSpaceLogger.log')\n", (10870, 10916), True, 'import os.path as fs\n'), ((1376, 1400), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'n_comp'}), '(n_components=n_comp)\n', (1379, 1400), False, 'from sklearn.decomposition import PCA\n'), ((1503, 1533), 'umap.UMAP', 'umap.UMAP', ([], {'n_components': 'n_comp'}), '(n_components=n_comp)\n', (1512, 1533), False, 'import umap\n'), ((2617, 2647), 'numpy.heaviside', 'np.heaviside', (['(predict - 0.5)', '(1)'], {}), '(predict - 0.5, 1)\n', (2629, 2647), True, 'import numpy as np\n'), ((10332, 10509), 'os.path.join', 'fs.join', (['RootPathLatentSpace', '"""preceptron"""', "('perceptron_Model%s_Loss%s_Launch%d_Layer%d.hdf5' % (name_NN_list[0],\n name_loss_list[iter_loss], launch_num + 1, layer_iter))"], {}), "(RootPathLatentSpace, 'preceptron', \n 'perceptron_Model%s_Loss%s_Launch%d_Layer%d.hdf5' % (name_NN_list[0],\n name_loss_list[iter_loss], launch_num + 1, layer_iter))\n", (10339, 10509), True, 'import os.path as fs\n'), ((14449, 14626), 'os.path.join', 'fs.join', (['RootPathLatentSpace', '"""preceptron"""', "('perceptron_Model%s_Loss%s_Launch%d_Layer%d.hdf5' % (name_NN_list[0],\n name_loss_list[iter_loss], launch_num + 1, layer_iter))"], {}), "(RootPathLatentSpace, 'preceptron', \n 'perceptron_Model%s_Loss%s_Launch%d_Layer%d.hdf5' % (name_NN_list[0],\n name_loss_list[iter_loss], launch_num + 1, layer_iter))\n", (14456, 14626), True, 'import os.path as fs\n')] |
import os
from datetime import timedelta
from typing import Any
class Track(object):
def __init__(self, id: str, title: str, url: str, duration: Any, filename: str):
self.id = id
self.title = title
self.url = url
self.duration = int(duration)
self.filename = filename
def details(self):
return "({duration})({id})[{title}](url={url})".format(
duration=timedelta(seconds=self.duration),
id=self.id,
title=self.title,
url=self.url).encode("utf-8")
def prettify(self):
return "({duration})[{title}]".format(
duration=timedelta(seconds=self.duration),
title=self.title).encode("utf-8")
def is_available(self):
return os.path.isfile(self.filename)
class Skip(object):
def __init__(self, limit=1):
self.limit = limit
self.register = set()
def add(self, user) -> bool:
if user in self.register:
return False
else:
self.register.add(user)
return True
def clear(self):
self.register.clear()
def ready(self) -> bool:
return len(self.register) >= self.limit
def status(self):
return "{votes} votes, {limit} needed".format(votes=len(self.register), limit=self.limit)
| [
"os.path.isfile",
"datetime.timedelta"
] | [((771, 800), 'os.path.isfile', 'os.path.isfile', (['self.filename'], {}), '(self.filename)\n', (785, 800), False, 'import os\n'), ((424, 456), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'self.duration'}), '(seconds=self.duration)\n', (433, 456), False, 'from datetime import timedelta\n'), ((647, 679), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'self.duration'}), '(seconds=self.duration)\n', (656, 679), False, 'from datetime import timedelta\n')] |
"""
Builds wheel files for the dependencies of an app, specified in requirements.txt, into the wheels/
folder of the app repo, and updates the app's JSON config specifying any generated wheels as pip
dependencies.
NOTE: If running this script with the --repair_wheels flag, make sure the script is executed from
a manylinux2014_x86_64 container https://github.com/pypa/manylinux
"""
import argparse
import json
import logging
import os
import pathlib
import random
import re
import shutil
import string
import subprocess
import sys
from collections import namedtuple
PLATFORM = 'manylinux2014_x86_64'
REPAIRED_WHEELS_REL_PATH = 'repaired-wheels'
WHEEL_PATTERN = re.compile(
r'^(?P<distribution>([A-Z0-9][A-Z0-9._-]*[A-Z0-9]))-([0-9]+\.?)+-'
r'(?P<python_version>[A-Z0-9]+\.?[A-Z0-9]+)-.+-'
r'(?P<platform>.+)\.whl$',
re.IGNORECASE)
Wheel = namedtuple('Wheel', ['file_name', 'distribution', 'python_version', 'platform'])
AppJsonWheelEntry = namedtuple('AppJsonWheel', ['module', 'input_file'])
PIP_DEPENDENCIES = 'pip_dependencies'
PIP3_DEPENDENCIES = 'pip3_dependencies'
PY2_WHEELS_DIR = PY2_TAG = 'py2'
PY3_WHEELS_DIR = PY3_TAG = 'py3'
SHARED_WHEELS_DIR = 'shared'
PY2_PY3_TAG = '{}.{}'.format(PY2_TAG, PY3_TAG)
CP2_TAG_PATTEN = re.compile(r'cp2\d?')
CP3_TAG_PATTERN = re.compile(r'cp3\d{0,2}')
AppJson = namedtuple('AppJson', ['file_name', 'content'])
APP_JSON_INDENT = 4
def _load_app_json(app_dir):
json_files = [f for f in os.listdir(app_dir)
if not f.endswith('.postman_collection.json') and f.endswith('.json')]
if len(json_files) != 1:
error_msg = 'Expected a single json file in {} but got {}'.format(app_dir, json_files)
logging.error(error_msg)
raise ValueError(error_msg)
with open(os.path.join(app_dir, json_files[0])) as f:
return AppJson(json_files[0], json.load(f))
def _repair_wheels(wheels_to_check, all_wheels, wheels_dir):
"""
Uses auditwheel to 1) check for platform wheels depending on external binary dependencies
and 2) bundle external binary dependencies into the platform wheels in necessary. Repaired
wheels are placed in a sub dir of wheels_dir by auditwheel, which we then use to replace
the original wheels at the root level of wheels_dir.
https://github.com/pypa/auditwheel
"""
if subprocess.run(['auditwheel', '-V']).returncode != 0:
logging.warning('auditwheel is not installed or is not supported on the given platform. '
'Skipping wheel repairs.')
return
repaired_wheels_dir = os.path.join(wheels_dir, REPAIRED_WHEELS_REL_PATH)
for whl in wheels_to_check:
logging.info('Checking %s', whl)
whl_path = os.path.join(wheels_dir, whl.file_name)
if subprocess.run(['auditwheel', 'show', whl_path]).returncode != 0:
logging.info('Skipping non-platform wheel %s', whl)
else:
repair_result = subprocess.run(['auditwheel', 'repair', whl_path,
'--plat', PLATFORM, '-w', repaired_wheels_dir], capture_output=True)
if repair_result.returncode != 0:
logging.warning('Failed to repair platform wheel %s', whl)
continue
# original wheel will be replaced by repaired wheels written to repaired-wheels/
os.remove(whl_path)
all_wheels.remove(whl)
if os.path.exists(repaired_wheels_dir):
for whl in os.listdir(repaired_wheels_dir):
shutil.copyfile(os.path.join(repaired_wheels_dir, whl), os.path.join(wheels_dir, whl))
match = WHEEL_PATTERN.match(whl)
all_wheels.add(Wheel(
whl, match.group('distribution'), match.group('python_version'), match.group('platform')))
shutil.rmtree(repaired_wheels_dir)
def _remove_platform_wheels(all_built_wheels, new_wheels_dir, existing_app_json_wheel_entries):
"""
Removes all platform wheels in :param: all_built_wheels from :param: new_wheels_dir
If there's an existing wheel specified in the app json for a dependency that we just built
a platform wheel for, then we'll assume the existing wheel is compatible for Phantom and
return it to indicate that the wheel should not be deleted.
"""
existing_wheels = {w.module: w for w in existing_app_json_wheel_entries}
existing_wheels_entries_to_keep = []
for whl in list(all_built_wheels):
if whl.platform != 'any':
logging.info('Removing platform wheel %s', whl.file_name)
all_built_wheels.remove(whl)
os.remove(os.path.join(new_wheels_dir, whl.file_name))
# Check if the app already has a wheel packaged for the given dependency
# to avoid deleting it
if whl.distribution in existing_wheels:
existing_whl_path = existing_wheels[whl.distribution].input_file
logging.info('Existing wheel for %s to be retained: %s',
whl.distribution, existing_whl_path)
existing_wheels_entries_to_keep.append(
AppJsonWheelEntry(whl.distribution, existing_whl_path))
return existing_wheels_entries_to_keep
def _update_app_json(app_json, pip_dependencies_key, wheel_entries, app_dir):
"""
Updates the app's JSON config to specify that the wheels under the
repo's wheel/ folder be installed as dependencies.
https://docs.splunk.com/Documentation/Phantom/4.10.7/DevelopApps/Metadata#Specifying_pip_dependencies
"""
wheel_paths = [{
'module': w.module,
'input_file': w.input_file
} for w in sorted(wheel_entries, key=lambda w: w.module)]
app_json.content[pip_dependencies_key] = {'wheel': wheel_paths}
with open(os.path.join(app_dir, app_json.file_name), 'w') as out:
json.dump(app_json.content, out, indent=APP_JSON_INDENT)
out.write('\n')
def _parse_pip_dependency_wheels(app_json, pip_dependency_key):
pip_dependencies = app_json.content.get(pip_dependency_key, {'wheel': []})
return [AppJsonWheelEntry(w['module'], w['input_file'])
for w in pip_dependencies.get('wheel', [])]
def _copy_new_wheels(new_wheels, new_wheels_dir, app_dir, app_json, pip_dependencies_key):
"""
Copies new wheels to the wheels/ directory of the app dir.
"""
new_wheel_paths = []
def copy_wheel(wheel_name, dst_path):
src_fp = os.path.join(new_wheels_dir, wheel_name)
new_wheel_paths.append(os.path.join('wheels', dst_path))
logging.info('Writing %s --> %s', wheel_name, new_wheel_paths[-1])
shutil.copyfile(src_fp, os.path.join(app_dir, new_wheel_paths[-1]))
# Make sure to write the new wheels under appropriate wheels/(py2|py3|shared) sub paths
# when the app supports both pip_dependencies/pip3_dependencies
other_key = PIP_DEPENDENCIES if pip_dependencies_key == PIP3_DEPENDENCIES else PIP3_DEPENDENCIES
if other_key in app_json.content:
for path in (PY2_WHEELS_DIR, PY3_WHEELS_DIR, SHARED_WHEELS_DIR):
pathlib.Path(os.path.join(app_dir, 'wheels', path)).mkdir(parents=True, exist_ok=True)
for whl in new_wheels:
if whl.python_version == PY2_PY3_TAG:
sub_path = os.path.join(SHARED_WHEELS_DIR, whl.file_name)
elif whl.python_version == PY2_TAG or CP2_TAG_PATTEN.match(whl.python_version):
sub_path = os.path.join(PY2_WHEELS_DIR, whl.file_name)
elif whl.python_version == PY3_TAG or CP3_TAG_PATTERN.match(whl.python_version):
sub_path = os.path.join(PY3_WHEELS_DIR, whl.file_name)
else:
raise ValueError('{} has an unexpected python version tag: {}'.format(
whl.file_name, whl.python_version))
copy_wheel(whl.file_name, sub_path)
else:
for whl in new_wheels:
copy_wheel(whl.file_name, whl.file_name)
return new_wheel_paths
def _remove_unreferenced_wheel_paths(app_dir, existing_wheel_paths, new_wheel_paths, wheel_entries_for_other_py_version):
"""
Removes wheels from the app directory that will no longer be referenced by in app JSON.
"""
all_referenced_wheel_paths = set(new_wheel_paths + [w.input_file for w in wheel_entries_for_other_py_version])
for path in existing_wheel_paths:
if path not in all_referenced_wheel_paths:
logging.info('Removing unreferenced wheel under path %s', path)
path = os.path.join(app_dir, path)
if not os.path.exists(path):
logging.warning('%s does not exist!', path)
continue
os.remove(os.path.join(app_dir, path))
def main(args):
"""
Main entrypoint.
"""
app_dir, pip_path, repair_wheels, pip_dependencies_key = \
args.app_dir, args.pip_path, args.repair_wheels, args.pip_dependencies_key
wheels_dir, requirements_file = '{}/wheels'.format(app_dir), '{}/requirements.txt'.format(app_dir)
pathlib.Path(wheels_dir).mkdir(exist_ok=True)
logging.info('Building wheels for %s from %s into %s',
pip_dependencies_key, requirements_file, wheels_dir)
temp_dir = os.path.join(app_dir, ''.join(random.choices(string.digits, k=10)))
os.mkdir(temp_dir)
try:
build_result = subprocess.run([pip_path, 'wheel',
'-f', wheels_dir,
'-w', temp_dir,
'-r', requirements_file], capture_output=True)
if build_result.stdout:
logging.info(build_result.stdout.decode())
if build_result.stderr:
logging.warning(build_result.stderr.decode())
if build_result.returncode != 0:
logging.error('Failed to build wheels from requirements.txt. '
'This typically occurs when you have a version conflict in requirements.txt or '
'you depend on a library requiring external development libraries (eg, python-ldap). '
'In the former case, please resolve any version conflicts before re-running this script. '
'In the latter case, please manually build the library in a manylinux https://github.com/pypa/manylinux '
'container, making sure to first install any required development libraries. If you are unable '
'to build a required dependency for your app, please raise an issue in the app repo for further assistance.')
return
# Some apps may have different dependencies for Python2 and Python3, and
# we don't want to override the wheels for the Python version we aren't building for
app_json = _load_app_json(app_dir)
if pip_dependencies_key == PIP3_DEPENDENCIES:
existing_app_json_wheel_entries = _parse_pip_dependency_wheels(app_json, PIP3_DEPENDENCIES)
else: # pip_dependencies_key == 'pip_dependencies
existing_app_json_wheel_entries = _parse_pip_dependency_wheels(app_json, PIP_DEPENDENCIES)
existing_wheel_paths = set(w.input_file for w in existing_app_json_wheel_entries)
wheel_file_names = set(os.listdir(temp_dir))
all_built_wheels = set(Wheel(m.group(), m.group('distribution'), m.group('python_version'), m.group('platform'))
for m in (WHEEL_PATTERN.match(f) for f in wheel_file_names))
updated_app_json_wheel_entries = []
if repair_wheels:
logging.info('Repairing new platform wheels...')
wheels_to_repair, existing_wheel_file_names = [], set(os.path.basename(p) for p in existing_wheel_paths)
for wheel in all_built_wheels:
if wheel.file_name not in existing_wheel_file_names:
wheels_to_repair.append(wheel)
_repair_wheels(wheels_to_repair, all_built_wheels, temp_dir)
else:
logging.warning('New platform wheels will not be repaired but removed.')
# Remove any platform wheels for dependencies that we just built, but check for any
# existing wheels for these given dependencies - we won't replace them
existing_platform_wheel_entries = _remove_platform_wheels(
all_built_wheels, temp_dir, existing_app_json_wheel_entries)
# Ensure the entries in the app JSON for the existing wheels don't get overwritten
updated_app_json_wheel_entries.extend(existing_platform_wheel_entries)
existing_wheel_paths -= set(w.input_file for w in existing_platform_wheel_entries)
# Add the newly built wheels and remove the wheels no longer needed from the wheels folder
new_wheel_paths = _copy_new_wheels(all_built_wheels, temp_dir, app_dir, app_json, pip_dependencies_key)
wheels_for_other_py_version = _parse_pip_dependency_wheels(app_json, PIP_DEPENDENCIES) \
if pip_dependencies_key == PIP3_DEPENDENCIES else _parse_pip_dependency_wheels(app_json, PIP3_DEPENDENCIES)
_remove_unreferenced_wheel_paths(app_dir=app_dir,
new_wheel_paths=new_wheel_paths,
existing_wheel_paths=existing_wheel_paths,
wheel_entries_for_other_py_version=wheels_for_other_py_version)
logging.info('Updating app json with latest dependencies...')
for pair in zip(all_built_wheels, new_wheel_paths):
updated_app_json_wheel_entries.append(
AppJsonWheelEntry(pair[0].distribution, pair[1]))
_update_app_json(app_json, pip_dependencies_key, updated_app_json_wheel_entries, app_dir)
except:
logging.exception('Unexpected error')
finally:
shutil.rmtree(temp_dir)
def parse_args():
help_str = ' '.join(line.strip() for line in __doc__.strip().splitlines())
parser = argparse.ArgumentParser(description=help_str)
parser.add_argument('app_dir', help='Path to the target app directory'),
parser.add_argument('pip_path', help='Path to the pip installation to use')
parser.add_argument('pip_dependencies_key', choices=[PIP_DEPENDENCIES, PIP3_DEPENDENCIES],
help='Key in the app JSON specifying pip dependencies')
parser.add_argument('--repair_wheels', action='store_true',
help='Whether to repair platform wheels with auditwheel'),
return parser.parse_args()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
sys.exit(main(parse_args()))
| [
"logging.getLogger",
"re.compile",
"logging.exception",
"random.choices",
"logging.info",
"logging.error",
"os.remove",
"os.path.exists",
"os.listdir",
"argparse.ArgumentParser",
"pathlib.Path",
"subprocess.run",
"os.mkdir",
"collections.namedtuple",
"logging.warning",
"os.path.join",
... | [((665, 837), 're.compile', 're.compile', (['"""^(?P<distribution>([A-Z0-9][A-Z0-9._-]*[A-Z0-9]))-([0-9]+\\\\.?)+-(?P<python_version>[A-Z0-9]+\\\\.?[A-Z0-9]+)-.+-(?P<platform>.+)\\\\.whl$"""', 're.IGNORECASE'], {}), "(\n '^(?P<distribution>([A-Z0-9][A-Z0-9._-]*[A-Z0-9]))-([0-9]+\\\\.?)+-(?P<python_version>[A-Z0-9]+\\\\.?[A-Z0-9]+)-.+-(?P<platform>.+)\\\\.whl$'\n , re.IGNORECASE)\n", (675, 837), False, 'import re\n'), ((860, 945), 'collections.namedtuple', 'namedtuple', (['"""Wheel"""', "['file_name', 'distribution', 'python_version', 'platform']"], {}), "('Wheel', ['file_name', 'distribution', 'python_version', 'platform']\n )\n", (870, 945), False, 'from collections import namedtuple\n'), ((961, 1013), 'collections.namedtuple', 'namedtuple', (['"""AppJsonWheel"""', "['module', 'input_file']"], {}), "('AppJsonWheel', ['module', 'input_file'])\n", (971, 1013), False, 'from collections import namedtuple\n'), ((1252, 1273), 're.compile', 're.compile', (['"""cp2\\\\d?"""'], {}), "('cp2\\\\d?')\n", (1262, 1273), False, 'import re\n'), ((1292, 1317), 're.compile', 're.compile', (['"""cp3\\\\d{0,2}"""'], {}), "('cp3\\\\d{0,2}')\n", (1302, 1317), False, 'import re\n'), ((1329, 1376), 'collections.namedtuple', 'namedtuple', (['"""AppJson"""', "['file_name', 'content']"], {}), "('AppJson', ['file_name', 'content'])\n", (1339, 1376), False, 'from collections import namedtuple\n'), ((2581, 2631), 'os.path.join', 'os.path.join', (['wheels_dir', 'REPAIRED_WHEELS_REL_PATH'], {}), '(wheels_dir, REPAIRED_WHEELS_REL_PATH)\n', (2593, 2631), False, 'import os\n'), ((3426, 3461), 'os.path.exists', 'os.path.exists', (['repaired_wheels_dir'], {}), '(repaired_wheels_dir)\n', (3440, 3461), False, 'import os\n'), ((9097, 9208), 'logging.info', 'logging.info', (['"""Building wheels for %s from %s into %s"""', 'pip_dependencies_key', 'requirements_file', 'wheels_dir'], {}), "('Building wheels for %s from %s into %s', pip_dependencies_key,\n requirements_file, wheels_dir)\n", (9109, 9208), False, 'import logging\n'), ((9310, 9328), 'os.mkdir', 'os.mkdir', (['temp_dir'], {}), '(temp_dir)\n', (9318, 9328), False, 'import os\n'), ((14031, 14076), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'help_str'}), '(description=help_str)\n', (14054, 14076), False, 'import argparse\n'), ((1699, 1723), 'logging.error', 'logging.error', (['error_msg'], {}), '(error_msg)\n', (1712, 1723), False, 'import logging\n'), ((2398, 2521), 'logging.warning', 'logging.warning', (['"""auditwheel is not installed or is not supported on the given platform. Skipping wheel repairs."""'], {}), "(\n 'auditwheel is not installed or is not supported on the given platform. Skipping wheel repairs.'\n )\n", (2413, 2521), False, 'import logging\n'), ((2673, 2705), 'logging.info', 'logging.info', (['"""Checking %s"""', 'whl'], {}), "('Checking %s', whl)\n", (2685, 2705), False, 'import logging\n'), ((2725, 2764), 'os.path.join', 'os.path.join', (['wheels_dir', 'whl.file_name'], {}), '(wheels_dir, whl.file_name)\n', (2737, 2764), False, 'import os\n'), ((3482, 3513), 'os.listdir', 'os.listdir', (['repaired_wheels_dir'], {}), '(repaired_wheels_dir)\n', (3492, 3513), False, 'import os\n'), ((3808, 3842), 'shutil.rmtree', 'shutil.rmtree', (['repaired_wheels_dir'], {}), '(repaired_wheels_dir)\n', (3821, 3842), False, 'import shutil\n'), ((5859, 5915), 'json.dump', 'json.dump', (['app_json.content', 'out'], {'indent': 'APP_JSON_INDENT'}), '(app_json.content, out, indent=APP_JSON_INDENT)\n', (5868, 5915), False, 'import json\n'), ((6458, 6498), 'os.path.join', 'os.path.join', (['new_wheels_dir', 'wheel_name'], {}), '(new_wheels_dir, wheel_name)\n', (6470, 6498), False, 'import os\n'), ((6572, 6638), 'logging.info', 'logging.info', (['"""Writing %s --> %s"""', 'wheel_name', 'new_wheel_paths[-1]'], {}), "('Writing %s --> %s', wheel_name, new_wheel_paths[-1])\n", (6584, 6638), False, 'import logging\n'), ((9362, 9481), 'subprocess.run', 'subprocess.run', (["[pip_path, 'wheel', '-f', wheels_dir, '-w', temp_dir, '-r', requirements_file]"], {'capture_output': '(True)'}), "([pip_path, 'wheel', '-f', wheels_dir, '-w', temp_dir, '-r',\n requirements_file], capture_output=True)\n", (9376, 9481), False, 'import subprocess\n'), ((13479, 13540), 'logging.info', 'logging.info', (['"""Updating app json with latest dependencies..."""'], {}), "('Updating app json with latest dependencies...')\n", (13491, 13540), False, 'import logging\n'), ((13895, 13918), 'shutil.rmtree', 'shutil.rmtree', (['temp_dir'], {}), '(temp_dir)\n', (13908, 13918), False, 'import shutil\n'), ((1457, 1476), 'os.listdir', 'os.listdir', (['app_dir'], {}), '(app_dir)\n', (1467, 1476), False, 'import os\n'), ((1775, 1811), 'os.path.join', 'os.path.join', (['app_dir', 'json_files[0]'], {}), '(app_dir, json_files[0])\n', (1787, 1811), False, 'import os\n'), ((1857, 1869), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1866, 1869), False, 'import json\n'), ((2336, 2372), 'subprocess.run', 'subprocess.run', (["['auditwheel', '-V']"], {}), "(['auditwheel', '-V'])\n", (2350, 2372), False, 'import subprocess\n'), ((2854, 2905), 'logging.info', 'logging.info', (['"""Skipping non-platform wheel %s"""', 'whl'], {}), "('Skipping non-platform wheel %s', whl)\n", (2866, 2905), False, 'import logging\n'), ((2948, 3070), 'subprocess.run', 'subprocess.run', (["['auditwheel', 'repair', whl_path, '--plat', PLATFORM, '-w',\n repaired_wheels_dir]"], {'capture_output': '(True)'}), "(['auditwheel', 'repair', whl_path, '--plat', PLATFORM, '-w',\n repaired_wheels_dir], capture_output=True)\n", (2962, 3070), False, 'import subprocess\n'), ((3363, 3382), 'os.remove', 'os.remove', (['whl_path'], {}), '(whl_path)\n', (3372, 3382), False, 'import os\n'), ((4501, 4558), 'logging.info', 'logging.info', (['"""Removing platform wheel %s"""', 'whl.file_name'], {}), "('Removing platform wheel %s', whl.file_name)\n", (4513, 4558), False, 'import logging\n'), ((5795, 5836), 'os.path.join', 'os.path.join', (['app_dir', 'app_json.file_name'], {}), '(app_dir, app_json.file_name)\n', (5807, 5836), False, 'import os\n'), ((6530, 6562), 'os.path.join', 'os.path.join', (['"""wheels"""', 'dst_path'], {}), "('wheels', dst_path)\n", (6542, 6562), False, 'import os\n'), ((6671, 6713), 'os.path.join', 'os.path.join', (['app_dir', 'new_wheel_paths[-1]'], {}), '(app_dir, new_wheel_paths[-1])\n', (6683, 6713), False, 'import os\n'), ((8450, 8513), 'logging.info', 'logging.info', (['"""Removing unreferenced wheel under path %s"""', 'path'], {}), "('Removing unreferenced wheel under path %s', path)\n", (8462, 8513), False, 'import logging\n'), ((8533, 8560), 'os.path.join', 'os.path.join', (['app_dir', 'path'], {}), '(app_dir, path)\n', (8545, 8560), False, 'import os\n'), ((9047, 9071), 'pathlib.Path', 'pathlib.Path', (['wheels_dir'], {}), '(wheels_dir)\n', (9059, 9071), False, 'import pathlib\n'), ((9268, 9303), 'random.choices', 'random.choices', (['string.digits'], {'k': '(10)'}), '(string.digits, k=10)\n', (9282, 9303), False, 'import random\n'), ((9826, 10452), 'logging.error', 'logging.error', (['"""Failed to build wheels from requirements.txt. This typically occurs when you have a version conflict in requirements.txt or you depend on a library requiring external development libraries (eg, python-ldap). In the former case, please resolve any version conflicts before re-running this script. In the latter case, please manually build the library in a manylinux https://github.com/pypa/manylinux container, making sure to first install any required development libraries. If you are unable to build a required dependency for your app, please raise an issue in the app repo for further assistance."""'], {}), "(\n 'Failed to build wheels from requirements.txt. This typically occurs when you have a version conflict in requirements.txt or you depend on a library requiring external development libraries (eg, python-ldap). In the former case, please resolve any version conflicts before re-running this script. In the latter case, please manually build the library in a manylinux https://github.com/pypa/manylinux container, making sure to first install any required development libraries. If you are unable to build a required dependency for your app, please raise an issue in the app repo for further assistance.'\n )\n", (9839, 10452), False, 'import logging\n'), ((11297, 11317), 'os.listdir', 'os.listdir', (['temp_dir'], {}), '(temp_dir)\n', (11307, 11317), False, 'import os\n'), ((11615, 11663), 'logging.info', 'logging.info', (['"""Repairing new platform wheels..."""'], {}), "('Repairing new platform wheels...')\n", (11627, 11663), False, 'import logging\n'), ((12044, 12116), 'logging.warning', 'logging.warning', (['"""New platform wheels will not be repaired but removed."""'], {}), "('New platform wheels will not be repaired but removed.')\n", (12059, 12116), False, 'import logging\n'), ((13836, 13873), 'logging.exception', 'logging.exception', (['"""Unexpected error"""'], {}), "('Unexpected error')\n", (13853, 13873), False, 'import logging\n'), ((14620, 14639), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (14637, 14639), False, 'import logging\n'), ((2776, 2824), 'subprocess.run', 'subprocess.run', (["['auditwheel', 'show', whl_path]"], {}), "(['auditwheel', 'show', whl_path])\n", (2790, 2824), False, 'import subprocess\n'), ((3173, 3231), 'logging.warning', 'logging.warning', (['"""Failed to repair platform wheel %s"""', 'whl'], {}), "('Failed to repair platform wheel %s', whl)\n", (3188, 3231), False, 'import logging\n'), ((3543, 3581), 'os.path.join', 'os.path.join', (['repaired_wheels_dir', 'whl'], {}), '(repaired_wheels_dir, whl)\n', (3555, 3581), False, 'import os\n'), ((3583, 3612), 'os.path.join', 'os.path.join', (['wheels_dir', 'whl'], {}), '(wheels_dir, whl)\n', (3595, 3612), False, 'import os\n'), ((4622, 4665), 'os.path.join', 'os.path.join', (['new_wheels_dir', 'whl.file_name'], {}), '(new_wheels_dir, whl.file_name)\n', (4634, 4665), False, 'import os\n'), ((4937, 5034), 'logging.info', 'logging.info', (['"""Existing wheel for %s to be retained: %s"""', 'whl.distribution', 'existing_whl_path'], {}), "('Existing wheel for %s to be retained: %s', whl.distribution,\n existing_whl_path)\n", (4949, 5034), False, 'import logging\n'), ((7296, 7342), 'os.path.join', 'os.path.join', (['SHARED_WHEELS_DIR', 'whl.file_name'], {}), '(SHARED_WHEELS_DIR, whl.file_name)\n', (7308, 7342), False, 'import os\n'), ((8580, 8600), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (8594, 8600), False, 'import os\n'), ((8618, 8661), 'logging.warning', 'logging.warning', (['"""%s does not exist!"""', 'path'], {}), "('%s does not exist!', path)\n", (8633, 8661), False, 'import logging\n'), ((8709, 8736), 'os.path.join', 'os.path.join', (['app_dir', 'path'], {}), '(app_dir, path)\n', (8721, 8736), False, 'import os\n'), ((7462, 7505), 'os.path.join', 'os.path.join', (['PY2_WHEELS_DIR', 'whl.file_name'], {}), '(PY2_WHEELS_DIR, whl.file_name)\n', (7474, 7505), False, 'import os\n'), ((7113, 7150), 'os.path.join', 'os.path.join', (['app_dir', '"""wheels"""', 'path'], {}), "(app_dir, 'wheels', path)\n", (7125, 7150), False, 'import os\n'), ((7626, 7669), 'os.path.join', 'os.path.join', (['PY3_WHEELS_DIR', 'whl.file_name'], {}), '(PY3_WHEELS_DIR, whl.file_name)\n', (7638, 7669), False, 'import os\n'), ((11730, 11749), 'os.path.basename', 'os.path.basename', (['p'], {}), '(p)\n', (11746, 11749), False, 'import os\n')] |
#!/usr/bin/env python
'''
kmer-collapse.py
Starting with a set of kmers using a standard DNA alphabet (ACTG), collapse
it to an encoded, optimized set using IUPAC-derived, degenerate base alphabet.
'''
import sys
import json
import itertools as it
import marisa_trie as mt
"""
Generate all subsets of specified set. Empty set is fine.
"""
def powerset(seq):
if len(seq) <= 1:
yield seq
yield []
else:
for item in powerset(seq[1:]):
yield [seq[0]]+item
yield item
'''
We define a couple substitution objects to map between degenerate and
unambiguous bases; ref.: http://www.bioinformatics.org/sms/iupac.html
'''
def forward_substitutions():
'''
Return an object of tuple key to degenerate base value pairings.
Different tuples can point to the same degenerate base.
'''
raw_forward_substitutions = {
'AG' : 'R',
'CT' : 'Y',
'GC' : 'S',
'AT' : 'W',
'GT' : 'K',
'AC' : 'M',
'CGT' : 'B',
'AGT' : 'D',
'ACT' : 'H',
'ACG' : 'V',
'ACGT' : 'N',
}
permuted_forward_substitutions = {}
for key, value in raw_forward_substitutions.items():
new_keys = list(it.permutations(list(key)))
for new_key in new_keys:
permuted_forward_substitutions[new_key] = value
return permuted_forward_substitutions
def reverse_substitutions():
'''
Return an object of degenerate base key to unambiguous base value pairings.
'''
return {
'A' : ['A'],
'T' : ['T'],
'C' : ['C'],
'G' : ['G'],
'R' : ['A', 'G'],
'Y' : ['C', 'T'],
'S' : ['G', 'C'],
'W' : ['A', 'T'],
'K' : ['G', 'T'],
'M' : ['A', 'C'],
'B' : ['C', 'G', 'T'],
'D' : ['A', 'G', 'T'],
'H' : ['A', 'C', 'T'],
'V' : ['A', 'C', 'G'],
'N' : ['A', 'C', 'G', 'T'],
}
forward_subs = forward_substitutions()
reverse_subs = reverse_substitutions()
def expand_encoded_candidate(encoded_candidate):
'''
Given an encoded mer, expand it to all ordered combinations of unambiguous
bases. For example, 'WW' expands to ['AA', 'AT', 'TA', 'TT']
'''
n_candidate = len(encoded_candidate)
subs = []
for c in list([x for x in encoded_candidate]):
rrs = reverse_subs[c]
subs.append(rrs)
expanded_candidates = []
i = 1
while True:
sub = subs.pop(0)
if len(subs) == 0: break
new_sub = [f'{a}{b}' for a in sub for b in subs[0]]
i += 1
if i == n_candidate:
expanded_candidates = new_sub
else:
subs[0] = new_sub
expanded_candidates = list(set(expanded_candidates))
return expanded_candidates
def test_encoded_candidate(encoded_candidate):
'''
Validate encoded candidate, if all expanded candicates are valid prefixes
in global input trie.
'''
expanded_candidates = expand_encoded_candidate(encoded_candidate)
for ec in expanded_candidates:
if len(input_trie.keys(ec)) == 0:
return False
return True
def main():
'''
Set up input data with kmers to encode.
'''
# input = ['AAAAAAAAAA', 'TAAAAAAAAA']
# input = ['AAAAAAAAAA','TAAAAAAAAA','GCGAAAAAAA']
# input = ['AAAAAAAAAA']
# input = ['AAAAAAAAAA', 'TAAAAAAAAA', 'CAAAAAAAAA', 'GAAAAAAAAA']
# input = ['AAAAAAAAAA', 'TAAAAAAAAA', 'TTAAAAAAAA', 'ATAAAAAAAA']
# input = ['AAAAAAAAAA', 'TAAAAAAAAA', 'CAAAAAAAAA', 'GAAAAAAAAA', 'TACAGATACA', 'AACAGAAAAA']
input = ['AAAAAAAAAA', 'TAAAAAAAAA', 'ACAAAAAAAA', 'AGAAAAAAAA']
k = len(input[0])
global input_trie
input_trie = mt.Trie(input)
'''
At each column, encode that column's bases, taken from all kmers.
'''
per_sequence_encoded_candidates = [{x:[]} for x in input]
for col_idx in range(k):
bases_to_permute = list(set([x[col_idx] for x in input]))
bases_powerset = list(powerset(bases_to_permute))
per_column_encodings = {}
for bpse in bases_powerset:
tk = tuple(bpse)
if tk in forward_subs:
for tkb in tk:
if tkb not in per_column_encodings:
per_column_encodings[tkb] = []
if forward_subs[tk] not in per_column_encodings[tkb]:
per_column_encodings[tkb].append(forward_subs[tk])
for b in bases_to_permute:
if b not in per_column_encodings:
per_column_encodings[b] = []
per_column_encodings[b].append(b)
'''
Initialize candidates per sequence, or extend after testing.
'''
for psec in per_sequence_encoded_candidates:
psck = list(psec.keys())[0]
ek = psck[col_idx]
if col_idx == 0:
for ekv in per_column_encodings[ek]:
psec[psck].append('{}'.format(ekv))
else:
old_candidates = psec[psck]
new_candidates = []
for oc in old_candidates:
for pcec in per_column_encodings[psck[col_idx]]:
new_candidate = '{}{}'.format(oc, pcec)
'''
Expand the new candidate. Test it. If the new candidate
passes testing when expanded, add it to the new list.
Continually pruning the tree of bad candidates should
help limit the number of combinations to explore for
longer kmers.
'''
if test_encoded_candidate(new_candidate):
new_candidates.append(new_candidate)
psec[psck] = new_candidates
'''
Filter candidates by kmer "row" to resolve conflicts.
'''
kmer_available = {}
for psec in per_sequence_encoded_candidates:
for pseck in psec.keys():
kmer_available[pseck] = True
encodings = set()
for psec in per_sequence_encoded_candidates:
for pseck, psecv in psec.items():
encoded_candidates = psecv
for ec in encoded_candidates:
eecs = expand_encoded_candidate(ec)
'''
Prime the pump.
'''
if len(encodings) == 0:
for eec in eecs:
encodings.add(ec)
kmer_available[eec] = False
break
'''
If we pick an encoding that would generate a kmer
that had previously been generated, this would give
an incorrect result. So we walk through the expanded
set of unambiguous kmers and add an encoding if all
its expanded kmers are available for inclusion in the
result set.
'''
expansion_test_passed = True
for eec in eecs:
if not kmer_available[eec]:
expansion_test_passed = False
if expansion_test_passed:
for eec in eecs:
encodings.add(ec)
kmer_available[eec] = False
encodings = list(encodings)
'''
Write encoded output.
'''
result = {
"input" : input,
"encoded_output" : encodings
}
sys.stdout.write('{}\n'.format(json.dumps(result, indent=4)))
if __name__ == '__main__':
main()
| [
"json.dumps",
"marisa_trie.Trie"
] | [((3729, 3743), 'marisa_trie.Trie', 'mt.Trie', (['input'], {}), '(input)\n', (3736, 3743), True, 'import marisa_trie as mt\n'), ((7556, 7584), 'json.dumps', 'json.dumps', (['result'], {'indent': '(4)'}), '(result, indent=4)\n', (7566, 7584), False, 'import json\n')] |
import unittest
from conpype import Pipeline, InitTask
import base64
import os
import subprocess
from conpype.utils.docker_daemon import START_SCRIPT, STOP_SCRIPT
from conpype.concourse.__shared import set_concourse_context
from contextlib import contextmanager
@contextmanager
def concourse_ctx():
set_concourse_context(True)
try:
yield
finally:
set_concourse_context(False)
TEST_DIR = os.path.abspath(os.path.dirname(__file__))
class TestPipeline(unittest.TestCase):
def test_init_task(self):
with Pipeline("test", script_dirs={"fake": "fake_scripts"}) as pipeline:
with pipeline.job("job") as job:
@job.task()
def task():
pass
self.assertEqual(len(pipeline.jobs), 1)
job = pipeline.jobs[0]
self.assertEqual(len(job.plan), 2)
init_task = job.plan[0]
self.assertTrue(isinstance(init_task, InitTask))
self.assertEqual(
init_task.init_dirs,
{
"starter": TEST_DIR,
"pythonpath/conpype": os.path.dirname(TEST_DIR),
"fake": os.path.join(TEST_DIR, "fake_scripts"),
},
)
data = init_task.package()
files = subprocess.check_output(["tar", "tjf", "-"], input=base64.b64decode(data)).decode("utf-8").split()
self.assertIn(f"starter/{os.path.basename(__file__)}", files)
self.assertIn(START_SCRIPT, files)
self.assertIn(STOP_SCRIPT, files)
self.assertIn("fake/test.sh", files)
self.assertEqual(pipeline.script_dir("fake"), os.path.join(TEST_DIR, "fake_scripts"))
with concourse_ctx():
self.assertEqual(pipeline.script_dir("fake"), os.path.abspath("scripts/fake"))
def test_job_groups(self):
with Pipeline("test") as pipeline:
with pipeline.job("job-a", groups=["a"]) as job:
@job.task()
def task_a():
pass
with pipeline.job("job-b", groups=["b"]) as job:
@job.task()
def task_b():
pass
with pipeline.job("job-ab", groups=["a", "b"]) as job:
@job.task()
def task_ab():
pass
concourse = pipeline.concourse()
self.assertIn("groups", concourse)
self.assertEqual(
[
{
"name": "a",
"jobs": ["job-a", "job-ab"],
},
{
"name": "b",
"jobs": ["job-b", "job-ab"],
},
],
concourse["groups"],
)
if __name__ == "__main__":
unittest.main()
# run > python -munittest in main conpype dir to execute
| [
"conpype.concourse.__shared.set_concourse_context",
"os.path.join",
"base64.b64decode",
"os.path.dirname",
"os.path.basename",
"unittest.main",
"os.path.abspath",
"conpype.Pipeline"
] | [((306, 333), 'conpype.concourse.__shared.set_concourse_context', 'set_concourse_context', (['(True)'], {}), '(True)\n', (327, 333), False, 'from conpype.concourse.__shared import set_concourse_context\n'), ((436, 461), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (451, 461), False, 'import os\n'), ((2907, 2922), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2920, 2922), False, 'import unittest\n'), ((378, 406), 'conpype.concourse.__shared.set_concourse_context', 'set_concourse_context', (['(False)'], {}), '(False)\n', (399, 406), False, 'from conpype.concourse.__shared import set_concourse_context\n'), ((547, 601), 'conpype.Pipeline', 'Pipeline', (['"""test"""'], {'script_dirs': "{'fake': 'fake_scripts'}"}), "('test', script_dirs={'fake': 'fake_scripts'})\n", (555, 601), False, 'from conpype import Pipeline, InitTask\n'), ((1919, 1935), 'conpype.Pipeline', 'Pipeline', (['"""test"""'], {}), "('test')\n", (1927, 1935), False, 'from conpype import Pipeline, InitTask\n'), ((1704, 1742), 'os.path.join', 'os.path.join', (['TEST_DIR', '"""fake_scripts"""'], {}), "(TEST_DIR, 'fake_scripts')\n", (1716, 1742), False, 'import os\n'), ((1143, 1168), 'os.path.dirname', 'os.path.dirname', (['TEST_DIR'], {}), '(TEST_DIR)\n', (1158, 1168), False, 'import os\n'), ((1198, 1236), 'os.path.join', 'os.path.join', (['TEST_DIR', '"""fake_scripts"""'], {}), "(TEST_DIR, 'fake_scripts')\n", (1210, 1236), False, 'import os\n'), ((1841, 1872), 'os.path.abspath', 'os.path.abspath', (['"""scripts/fake"""'], {}), "('scripts/fake')\n", (1856, 1872), False, 'import os\n'), ((1466, 1492), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (1482, 1492), False, 'import os\n'), ((1381, 1403), 'base64.b64decode', 'base64.b64decode', (['data'], {}), '(data)\n', (1397, 1403), False, 'import base64\n')] |
import os
import cv2
import numpy as np
import tensorflow as tf
import tensorflow.compat.v1 as tf
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
from flask_ngrok import run_with_ngrok
from flask import Flask,request,send_from_directory,render_template
# GLOBAL ACCESS
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
THIS_FOLDER = os.path.dirname(os.path.abspath(__file__))
# SETUP APPLICATION
tf.disable_eager_execution()
app = Flask(__name__, static_url_path='')
run_with_ngrok(app)
def sparse_tensor_to_strs(sparse_tensor):
indices= sparse_tensor[0][0]
values = sparse_tensor[0][1]
dense_shape = sparse_tensor[0][2]
strs = [ [] for i in range(dense_shape[0]) ]
string = []
ptr = 0
b = 0
for idx in range(len(indices)):
if indices[idx][0] != b:
strs[b] = string
string = []
b = indices[idx][0]
string.append(values[ptr])
ptr = ptr + 1
strs[b] = string
return strs
def normalize(image):
return (255. - image)/255.
def resize(image, height):
width = int(float(height * image.shape[1]) / image.shape[0])
sample_img = cv2.resize(image, (width, height))
return sample_img
voc_file = "vocabulary_semantic.txt"
model = "semantic_model/semantic_model.meta"
tf.reset_default_graph()
sess = tf.InteractiveSession()
# Read the dictionary
dict_file = open(voc_file,'r')
dict_list = dict_file.read().splitlines()
int2word = dict()
for word in dict_list:
word_idx = len(int2word)
int2word[word_idx] = word
dict_file.close()
# Restore weights
saver = tf.train.import_meta_graph(model)
saver.restore(sess,model[:-5])
graph = tf.get_default_graph()
input = graph.get_tensor_by_name("model_input:0")
seq_len = graph.get_tensor_by_name("seq_lengths:0")
rnn_keep_prob = graph.get_tensor_by_name("keep_prob:0")
height_tensor = graph.get_tensor_by_name("input_height:0")
width_reduction_tensor = graph.get_tensor_by_name("width_reduction:0")
logits = tf.get_collection("logits")[0]
# Constants that are saved inside the model itself
WIDTH_REDUCTION, HEIGHT = sess.run([width_reduction_tensor, height_tensor])
decoded, _ = tf.nn.ctc_greedy_decoder(logits, seq_len)
# HOME
@app.route("/")
def root():
return render_template('index.html')
# IMAGE REQUEST
@app.route('/img/<filename>')
def send_img(filename):
return send_from_directory('', filename)
# ANDROID REQUEST
@app.route('/android/predict', methods = ['GET', 'POST'])
def login():
return 'Yeah, it works.'
# GET
@app.route('/users/<var>')
def hello_user(var):
"""
this serves as a demo purpose
:param user:
:return: str
"""
return "Wow, the GET works %s!" % var
# POST
@app.route('/api/post_some_data', methods=['POST'])
def get_text_prediction():
"""
predicts requested text whether it is ham or spam
:return: json
"""
json = request.get_json()
print(json)
if len(json['text']) == 0:
return jsonify({'error': 'invalid input'})
return jsonify({'This is the KEY': json['This is the value?']})
#UPLOAD_FOLDER = 'static/upload'
#app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
#@app.route('/upload', methods=['GET','POST'])
#def upload():
# if flask.request.method == "POST":
# files = flask.request.files.getlist("file")
# for file in files:
# file.save(os.path.join(app.config['UPLOAD_FOLDER'], file.filename))
# MODEL PREDICTION
@app.route('/predict', methods = ['GET', 'POST'])
def predict():
if request.method == 'POST':
f = request.files['file']
img = f
image = Image.open(img).convert('L')
image = np.array(image)
image = resize(image, HEIGHT)
image = normalize(image)
image = np.asarray(image).reshape(1,image.shape[0],image.shape[1],1)
seq_lengths = [ image.shape[2] / WIDTH_REDUCTION ]
prediction = sess.run(decoded,
feed_dict={
input: image,
seq_len: seq_lengths,
rnn_keep_prob: 1.0,
})
str_predictions = sparse_tensor_to_strs(prediction)
array_of_notes = []
for w in str_predictions[0]:
array_of_notes.append(int2word[w])
notes=[]
for i in array_of_notes:
if i[0:5]=="note-":
if not i[6].isdigit():
notes.append(i[5:7])
else:
notes.append(i[5])
img = Image.open(img).convert('L')
size = (img.size[0], int(img.size[1]*1.5))
layer = Image.new('RGB', size, (255,255,255))
layer.paste(img, box=None)
img_arr = np.array(layer)
height = int(img_arr.shape[0])
width = int(img_arr.shape[1])
print(img_arr.shape[0])
draw = ImageDraw.Draw(layer)
# font = ImageFont.truetype(<font-file>, <font-size>)
font = ImageFont.truetype("Aaargh.ttf", 16)
# draw.text((x, y),"Sample Text",(r,g,b))
j = width / 9
for i in notes:
draw.text((j, height-40), i, (0,0,0), font=font)
j+= (width / (len(notes) + 4))
layer.save("img/annotated.png")
return render_template('result.html')
if __name__=="__main__":
app.run()
| [
"flask.render_template",
"flask.Flask",
"PIL.Image.new",
"numpy.array",
"PIL.ImageDraw.Draw",
"tensorflow.compat.v1.get_collection",
"tensorflow.compat.v1.get_default_graph",
"flask.send_from_directory",
"tensorflow.compat.v1.nn.ctc_greedy_decoder",
"numpy.asarray",
"PIL.ImageFont.truetype",
"... | [((414, 442), 'tensorflow.compat.v1.disable_eager_execution', 'tf.disable_eager_execution', ([], {}), '()\n', (440, 442), True, 'import tensorflow.compat.v1 as tf\n'), ((449, 484), 'flask.Flask', 'Flask', (['__name__'], {'static_url_path': '""""""'}), "(__name__, static_url_path='')\n", (454, 484), False, 'from flask import Flask, request, send_from_directory, render_template\n'), ((485, 504), 'flask_ngrok.run_with_ngrok', 'run_with_ngrok', (['app'], {}), '(app)\n', (499, 504), False, 'from flask_ngrok import run_with_ngrok\n'), ((1291, 1315), 'tensorflow.compat.v1.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (1313, 1315), True, 'import tensorflow.compat.v1 as tf\n'), ((1323, 1346), 'tensorflow.compat.v1.InteractiveSession', 'tf.InteractiveSession', ([], {}), '()\n', (1344, 1346), True, 'import tensorflow.compat.v1 as tf\n'), ((1587, 1620), 'tensorflow.compat.v1.train.import_meta_graph', 'tf.train.import_meta_graph', (['model'], {}), '(model)\n', (1613, 1620), True, 'import tensorflow.compat.v1 as tf\n'), ((1660, 1682), 'tensorflow.compat.v1.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (1680, 1682), True, 'import tensorflow.compat.v1 as tf\n'), ((2153, 2194), 'tensorflow.compat.v1.nn.ctc_greedy_decoder', 'tf.nn.ctc_greedy_decoder', (['logits', 'seq_len'], {}), '(logits, seq_len)\n', (2177, 2194), True, 'import tensorflow.compat.v1 as tf\n'), ((367, 392), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (382, 392), False, 'import os\n'), ((1151, 1185), 'cv2.resize', 'cv2.resize', (['image', '(width, height)'], {}), '(image, (width, height))\n', (1161, 1185), False, 'import cv2\n'), ((1980, 2007), 'tensorflow.compat.v1.get_collection', 'tf.get_collection', (['"""logits"""'], {}), "('logits')\n", (1997, 2007), True, 'import tensorflow.compat.v1 as tf\n'), ((2241, 2270), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (2256, 2270), False, 'from flask import Flask, request, send_from_directory, render_template\n'), ((2352, 2385), 'flask.send_from_directory', 'send_from_directory', (['""""""', 'filename'], {}), "('', filename)\n", (2371, 2385), False, 'from flask import Flask, request, send_from_directory, render_template\n'), ((2870, 2888), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (2886, 2888), False, 'from flask import Flask, request, send_from_directory, render_template\n'), ((3629, 3644), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (3637, 3644), True, 'import numpy as np\n'), ((4610, 4649), 'PIL.Image.new', 'Image.new', (['"""RGB"""', 'size', '(255, 255, 255)'], {}), "('RGB', size, (255, 255, 255))\n", (4619, 4649), False, 'from PIL import Image\n'), ((4701, 4716), 'numpy.array', 'np.array', (['layer'], {}), '(layer)\n', (4709, 4716), True, 'import numpy as np\n'), ((4841, 4862), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['layer'], {}), '(layer)\n', (4855, 4862), False, 'from PIL import ImageDraw\n'), ((4940, 4976), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['"""Aaargh.ttf"""', '(16)'], {}), "('Aaargh.ttf', 16)\n", (4958, 4976), False, 'from PIL import ImageFont\n'), ((5232, 5262), 'flask.render_template', 'render_template', (['"""result.html"""'], {}), "('result.html')\n", (5247, 5262), False, 'from flask import Flask, request, send_from_directory, render_template\n'), ((3584, 3599), 'PIL.Image.open', 'Image.open', (['img'], {}), '(img)\n', (3594, 3599), False, 'from PIL import Image\n'), ((3732, 3749), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (3742, 3749), True, 'import numpy as np\n'), ((4514, 4529), 'PIL.Image.open', 'Image.open', (['img'], {}), '(img)\n', (4524, 4529), False, 'from PIL import Image\n')] |
#!/usr/bin/python3
"""
Library for Raspberry Pi interfacing with the 16-Bit I/O Expander MCP23017
from Microchip Technology.
"""
__all__ = ["MCP23017", "LCD20x4"]
import time
import smbus
from collections import Iterable
class MCP23017:
"""
Class for the MCP23017 I/O port expander.
"""
REG_BASE_ADDR = {'IODIR': 0x00, 'IPOL': 0x02, 'GPINTEN': 0x04, 'DEFVAL': 0x06, 'INTCON': 0x08, 'IOCON': 0x0A,
'GPPU': 0x0C, 'INTF': 0x0E, 'INTCAP': 0x10, 'GPIO':0x12, 'OLAT': 0x14}
OUT = 0
IN = 1
LOW = 0
HIGH = 1
PUD_DOWN = 0
PUD_UP = 1
class RegisterValueError(ValueError):
def __init__(self, value=''):
self._message = value
@property
def message(self):
return self._message
@message.setter
def message(self, value):
self._message = value
def __str__(self):
return self.message
def __init__(self, bus, address):
self._bus = smbus.SMBus(bus)
self.address = address
self._register_values = dict()
# Leave ICON.BANK = 0 but set ICON.SEQOP = 1
self._bus.write_byte_data(self.address, MCP23017.REG_BASE_ADDR['IOCON'], 1 << 5)
# Read all register values
for key in MCP23017.REG_BASE_ADDR.keys():
self._register_values[key] = self._bus.read_word_data(self.address, MCP23017.REG_BASE_ADDR[key])
def close(self):
self._bus.close()
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
self.close()
def __str__(self):
return '<Device class {0} with address {1}>'.format(self.__class__, self.address)
def setmode(self, *pargs, **kwargs):
"""
Method for compatibility with RPi.GPIO interface only.
"""
pass
def setwarnings(self, value):
"""
Method for compatibility with RPi.GPIO interface only.
"""
pass
@staticmethod
def set_bit_in_word(word, bit, value):
"""
Static helper method to set a bit in a word.
"""
if 0 <= bit <= 15 and value in (0, 1):
if value:
word |= (1 << bit)
else:
word &= ~(1 << bit)
return word
else:
raise MCP23017.RegisterValueError
def _write_register_word(self, key, bits, values):
"""
Helper method to manipulate the instance register values.
Works for both for a single bit as well as for a list of bits.
"""
word = self._register_values[key]
if not isinstance(bits, Iterable):
word = MCP23017.set_bit_in_word(word, bits, values)
else:
number_of_bits = len(bits)
if not isinstance(values, Iterable):
# if value is a single value we have to create a list a corresponding value list.
values = [values] * number_of_bits
else:
if len(values) != number_of_bits:
raise MCP23017.RegisterValueError
for bit, value in zip(bits, values):
word = MCP23017.set_bit_in_word(word, bit, value)
# check if we have a the word differs to the previous one
high_byte_diff, low_byte_diff = divmod(word ^ self._register_values[key], 256)
if high_byte_diff and low_byte_diff:
# both bytes changed so send the complete word
self._bus.write_word_data(self.address, MCP23017.REG_BASE_ADDR[key], word)
elif high_byte_diff ^ low_byte_diff:
# only one byte differs so we have to send only this one
value_msb, value_lsb = divmod(word, 256)
offset, value = (0, value_lsb) if low_byte_diff else (1, value_msb)
self._bus.write_byte_data(self.address, MCP23017.REG_BASE_ADDR[key] + offset, value)
else:
# no change so we do not need to set the hardware register value
pass
self._register_values[key] = word
def setup(self, pins, directions, initial=None, pull_up_down=None):
"""
Configures a pin or a list of pins either as output or as input.
"""
try:
self._write_register_word('IODIR', pins, directions)
if pull_up_down is not None:
raise NotImplementedError
if initial is not None:
raise NotImplementedError
except MCP23017.RegisterValueError as e:
e.message = 'Invalid pin(s) or direction value(s)...'
raise
def output(self, pins, states):
"""
Sets the logic output level of the pin or a list of pins.
Valid values for the output state are MCP23017.LOW / 0 / False or MCP23017.HIGH / 1 / True.
"""
try:
self._write_register_word('GPIO', pins, states)
except MCP23017.RegisterValueError as e:
e.message = 'Invalid pin(s) or state value(s)...'
raise
def input(self, pin):
"""
Reads the logic level of the pin.
"""
self._register_values['GPIO'] = self._bus.read_word_data(self.address, MCP23017.REG_BASE_ADDR['GPIO'])
return MCP23017.HIGH if ((self._register_values['GPIO'] >> pin)) & 1 else MCP23017.LOW
def gpio_function(self, pin):
"""
Returns whether the pin is configured as input or as output.
"""
self._register_values['IODIR'] = self._bus.read_word_data(self.address, MCP23017.REG_BASE_ADDR['IODIR'])
return MCP23017.IN if ((self._register_values['IODIR'] >> pin)) & 1 else MCP23017.OUT
def cleanup(self):
pass
class LCD20x4:
"""
Simple class for a character lcd connected via the MCP23017 I/O Expander.
"""
LINE_OFFSETS = [0x00, 0x40, 0x14, 0x54]
LINE_WIDTH = 20
LINE_NUMBER = 4
CMD_CLEAR_DISPLAY = 0x01
CMD_RETURN_HOME = 0x02
CMD_SET_ENTRY_MODE = 0x04
ENTRY_LEFT = 0x02
CMD_DISPLAY_CONTROL = 0x08
CURSOR_ON = 0x02
CURSOR_OFF = 0x00
DISPLAY_ON = 0x04
DISPLAY_OFF = 0x00
# Flags for the function set instruction
CMD_FUNCTION_SET = 0x20
EIGHT_BIT_MODE = 0x10
FOUR_BIT_MODE = 0x00
ONE_LINE = 0x00
TWO_LINES = 0x08
CMD_SET_DDRAM_ADDRESS = 0x80
def __init__(self, interface, rs, en, data, rw=None):
self._pins = {}
self._interface = interface
self._pins['RS'] = rs
self._pins['E'] = en
self._bit_mode = len(data)
if self._bit_mode not in (4, 8):
raise ValueError('Invalid number of data pins...')
self._pins['DATA']= data
self._pins['RW'] = rw
# Setup all pins as outputs
for pin in (rs, en, *data):
self._interface.setup(pin, MCP23017.OUT)
self._interface.output(pin, MCP23017.LOW)
if rw is not None:
self._interface.setup(rw, MCP23017.OUT)
# for now we use the pin only for write actions!
self._interface.output(rw, MCP23017.LOW)
function_set = LCD20x4.CMD_FUNCTION_SET |\
(LCD20x4.FOUR_BIT_MODE if self._bit_mode == 4 else LCD20x4.EIGHT_BIT_MODE) |\
LCD20x4.TWO_LINES
entry_mode = LCD20x4.CMD_SET_ENTRY_MODE | LCD20x4.ENTRY_LEFT
if self._bit_mode == 4:
self._write(0x33, delay_ms = 4.1)
self._write(0x32, delay_ms = 0.1)
else:
for _ in range(3):
self._write(function_set, delay_ms = 4.1)
self._write(function_set)
self._write(entry_mode)
self.display_off()
self.clear_display()
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def close(self):
self._interface.cleanup()
def _write(self, data, char_mode=False, delay_ms=0):
"""
Writes the 8-bit value either in character or in command mode.
"""
self._interface.output([self._pins['RS'], self._pins['E']], [char_mode, MCP23017.LOW])
data = divmod(data, 16) if self._bit_mode == 4 else (data,)
for number in data:
output = []
i = 0
while i < self._bit_mode:
output.append(MCP23017.HIGH if ((number >> i) & 1) else MCP23017.LOW)
i += 1
self._interface.output(self._pins['DATA'], output)
self._interface.output(self._pins['E'], MCP23017.HIGH)
self._interface.output(self._pins['E'], MCP23017.LOW)
if delay_ms > 0: time.sleep(delay_ms / 1000.0)
def clear_display(self):
"""
Clears the character display.
"""
self._write(LCD20x4.CMD_CLEAR_DISPLAY)
self.set_cursor_position(0, 0)
def display_off(self):
self._write(LCD20x4.CMD_DISPLAY_CONTROL | LCD20x4.DISPLAY_OFF, delay_ms=0.037)
def display_on(self):
self._write(LCD20x4.CMD_DISPLAY_CONTROL | LCD20x4.DISPLAY_ON, delay_ms=0.037)
def set_cursor_position(self, line, column):
"""
Sets the cursor to the desired position defined by the line and column number.
"""
if 0 <= line < LCD20x4.LINE_NUMBER and 0 <= column < LCD20x4.LINE_WIDTH:
self._write((LCD20x4.CMD_SET_DDRAM_ADDRESS | (LCD20x4.LINE_OFFSETS[line]) + column), delay_ms=0.037)
else:
raise ValueError('Invalid line or column position')
def write_line(self, line, column, text):
"""
Writes a text to the specified line and starting at the specified column.
"""
self.set_cursor_position(line, column)
for char in text[:(LCD20x4.LINE_WIDTH - column)]:
self.write_char(char)
def clear_line(self, line, column=0):
"""
Clears the line beginning from column.
"""
if 0 <= line < LCD20x4.LINE_NUMBER and 0 <= column < LCD20x4.LINE_WIDTH:
self.write_line(line, column, ' ' * (LCD20x4.LINE_WIDTH - column))
def write_char(self, char):
"""
Writes a character to the current cursor position.
"""
self._write(ord(char), True)
| [
"smbus.SMBus",
"time.sleep"
] | [((998, 1014), 'smbus.SMBus', 'smbus.SMBus', (['bus'], {}), '(bus)\n', (1009, 1014), False, 'import smbus\n'), ((8627, 8656), 'time.sleep', 'time.sleep', (['(delay_ms / 1000.0)'], {}), '(delay_ms / 1000.0)\n', (8637, 8656), False, 'import time\n')] |
from pyspark.sql import SparkSession
from pyspark.sql.functions import explode
from pyspark.sql.functions import split
spark = SparkSession \
.builder \
.appName("StructuredNetworkWordCount") \
.getOrCreate()
# Create DataFrame representing the stream of input lines from connection to localhost:9999
# "ID","language","Date","source","len","likes","RTs","Hashtags","Usernames","Userid","name","Place","followers","friends"
userSchema = StructType().add("ID", "string").add("language", "string").add("source","string").add("len","string").add("likes","integer").add("RTs","integer").add("Hashtags","string").add("Usernames","string").add("Userid","string").add("name","string").add("Place","string").add("followers","string").add("friends","string")
lines = spark \
.readStream \
.format("socket") \
.option("host", "localhost") \
.option("port", 9009) \
.schema(userSchema) \
.csv("/stream/FIFA_modded_small_1.csv") \
.load()
# Split the lines into words
# explode() takes in an array (or a map) as an input and outputs the elements of the array (map) as separate rows.
words = lines.select(explode(split(lines.value, " ")).alias("word"))
# Generate running word count
wordCounts = words.groupBy("word").count()
# Start running the query that prints the running counts to the console
query = wordCounts \
.writeStream \
.outputMode("complete") \
.format("console") \
.start()
query.awaitTermination()
| [
"pyspark.sql.functions.split",
"pyspark.sql.SparkSession.builder.appName"
] | [((128, 186), 'pyspark.sql.SparkSession.builder.appName', 'SparkSession.builder.appName', (['"""StructuredNetworkWordCount"""'], {}), "('StructuredNetworkWordCount')\n", (156, 186), False, 'from pyspark.sql import SparkSession\n'), ((1142, 1165), 'pyspark.sql.functions.split', 'split', (['lines.value', '""" """'], {}), "(lines.value, ' ')\n", (1147, 1165), False, 'from pyspark.sql.functions import split\n')] |
import time,os,math,inspect,re,sys,random,argparse
from env import SenseEnv
from torch.autograd import Variable
import numpy as np
from itertools import count
from collections import namedtuple
from tensorboardX import SummaryWriter
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.autograd as autograd
from torch.autograd import Variable
writer = SummaryWriter()
SavedAction = namedtuple('SavedAction', ['action', 'value'])
class Policy(nn.Module):
def __init__(self,observation_space_n,action_space_n):
super(Policy, self).__init__()
self.affine1 = nn.Linear(observation_space_n, 256)
self.action1 = nn.Linear(256, 128)
self.value1 = nn.Linear(256, 128)
self.action_head = nn.Linear(128, action_space_n)
self.value_head = nn.Linear(128, 1)
self.saved_actions = []
self.rewards = []
self.init_weights()
def init_weights(self):
self.affine1.weight.data.uniform_(-0.1, 0.1)
self.action1.weight.data.uniform_(-0.1, 0.1)
self.value1.weight.data.uniform_(-0.1, 0.1)
def forward(self, x):
x = F.relu(self.affine1(x))
xa = F.relu(self.action1(x))
xv = F.relu(self.value1(x))
action_scores = self.action_head(xa)
state_values = self.value_head(xv)
return F.softmax(action_scores), state_values
class CNN(nn.Module):
def __init__(self,classification_n):
super(CNN, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(1, 16, kernel_size=5, padding=2),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.MaxPool2d(2))
self.layer2 = nn.Sequential(
nn.Conv2d(16, 32, kernel_size=5, padding=2),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(2))
#self.fc = nn.Linear(7*7*32, 2)
self.fc = nn.Linear(80000, classification_n)
def forward(self, x):
x = x.unsqueeze(1).float()
out = self.layer1(x)
out = self.layer2(out)
#print("size before",out.size())
out = out.view(out.size(0), -1)
#print("size after",out.size())
out = self.fc(out)
return out
parser = argparse.ArgumentParser(description='SenseNet actor-critic example')
parser.add_argument('--gamma', type=float, default=0.99, metavar='G', help='discount factor (default: 0.99)')
parser.add_argument('--epsilon', type=float, default=0.6, metavar='G', help='epsilon value for random action (default: 0.6)')
parser.add_argument('--seed', type=int, default=42, metavar='N', help='random seed (default: 42)')
parser.add_argument('--batch_size', type=int, default=42, metavar='N', help='batch size (default: 42)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N', help='interval between training status logs (default: 10)')
parser.add_argument('--render', action='store_true', help='render the environment')
parser.add_argument('--debug', action='store_true', help='turn on debug mode')
parser.add_argument('--gpu', action='store_true', help='use GPU')
parser.add_argument('--log', type=str, help='log experiment to tensorboard')
parser.add_argument('--model_path', type=str, help='path to store/retrieve model at')
parser.add_argument('--mode', type=str, default="train", help='train/test/all model')
args = parser.parse_args()
def select_action(state,n_actions,epsilon=0.6):
if np.random.rand() < epsilon:
return np.random.choice(n_actions)
else:
state = torch.from_numpy(state).float().unsqueeze(0)
probs, state_value = model(Variable(state))
action = probs.multinomial()
model.saved_actions.append(SavedAction(action, state_value))
return action.data[0][0]
def finish_episode():
R = 0
saved_actions = model.saved_actions
value_loss = 0
rewards = []
for r in model.rewards[::-1]:
R = r + args.gamma * R
rewards.insert(0, R)
rewards = torch.Tensor(rewards)
rewards = (rewards - rewards.mean()) / (rewards.std() + np.finfo(np.float32).eps)
for (action, value), r in zip(saved_actions, rewards):
reward = r - value.data[0,0]
action.reinforce(reward)
value_loss += F.smooth_l1_loss(value, Variable(torch.Tensor([r])))
optimizer.zero_grad()
final_nodes = [value_loss] + list(map(lambda p: p.action, saved_actions))
gradients = [torch.ones(1)] + [None] * len(saved_actions)
autograd.backward(final_nodes, gradients)
optimizer.step()
del model.rewards[:]
del model.saved_actions[:]
#train
env = SenseEnv(vars(args))
print("action space: ",env.action_space())
model = Policy(env.observation_space(),env.action_space_n())
cnn = CNN(env.classification_n())
if args.gpu and torch.cuda.is_available():
model.cuda()
cnn.cuda()
if args.model_path:
if os.path.exists(args.model_path+"/model.pkl"):
print("loading pretrained models")
model.load_state_dict(torch.load(args.model_path+"/model.pkl"))
cnn.load_state_dict(torch.load(args.model_path+"/cnn.pkl"))
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
classifier_criterion = nn.CrossEntropyLoss()
classifier_optimizer = torch.optim.Adam(cnn.parameters(), lr=0.001)
running_reward = 0
batch = []
labels = []
total_steps = 0
if args.mode == "train" or args.mode == "all":
for i_episode in count(1000):
observation = env.reset()
print("episode: ", i_episode)
for t in range(1000):
action = select_action(observation,env.action_space_n(),args.epsilon)
observation, reward, done, info = env.step(action)
model.rewards.append(reward)
if env.is_touching():
print("touching!")
#print("batch size", len(batch))
if len(batch) > args.batch_size:
#TODO GPU support
#batch = torch.from_numpy(np.asarray(batch))
batch = torch.LongTensor(torch.from_numpy(np.asarray(batch)))
labels = torch.from_numpy(np.asarray(labels))
#labels = torch.LongTensor(torch.from_numpy(np.asarray(labels)))
if args.gpu and torch.cuda.is_available():
batch = batch.cuda()
labels = labels.cuda()
batch = Variable(batch)
labels = Variable(labels)
classifier_optimizer.zero_grad()
outputs = cnn(batch)
loss = classifier_criterion(outputs, labels)
loss.backward()
classifier_optimizer.step()
print ('Loss: %.4f' %(loss.data[0]))
if args.log:
writer.add_scalar(args.log + "/loss",loss.data[0],total_steps)
batch = []
labels = []
else:
batch.append(observation.reshape(200,200))
labels.append(env.class_label)
if done:
break
running_reward = running_reward * 0.99 + t * 0.01
print("running reward ", running_reward)
total_steps +=1
finish_episode()
if i_episode % args.log_interval == 0:
if args.log:
writer.add_scalar(args.log+"/reward",running_reward,total_steps)
print('Episode {}\tLast length: {:5d}\tAverage length: {:.2f}'.format(i_episode, t, running_reward))
if running_reward > 5000: #env.spec.reward_threshold:
print("Solved! Running reward is now {} and the last episode runs to {} time steps!".format(running_reward, t))
break
if args.model_path:
torch.save(model.state_dict(), os.path.join(args.model_path, 'policy.pkl' ))
torch.save(model.state_dict(), os.path.join(args.model_path, 'cnn.pkl' ))
elif args.mode == "test" or args.mode == "all":
#test
test_labels = []
predicted_labels = []
steps_to_guess = []
correct = 0
total = 0
max_steps = 500
for i_episode in range(100):
guesses = []
print("testing on a new object")
observation = env.reset()
for t in range(max_steps):
action = select_action(observation,env.action_space_n(),args.epsilon)
observation, reward, done, info = env.step(action)
model.rewards.append(reward)
#if confidence over 90%, then use it
if (t >= max_steps-1 and len(guesses) == 0) or env.is_touching:
x = [observation.reshape(200,200)]
x = torch.LongTensor(torch.from_numpy(np.asarray(x)))
x = Variable(x)
output = cnn(x)
prob, predicted = torch.max(output.data, 1)
correct += int(predicted[0][0] == env.class_label)
total += 1
print("predicted ", predicted[0][0], " with prob ", prob[0][0], " correct answer is: ",env.class_label)
print('Accuracy of the network: %d %%' % (100 * correct / total ))
else:
for i_episode in range(100):
observation = env.reset()
for t in range(1000):
env.render()
action = np.random.choice(env.action_space_n())
observation,reward,done,info = env.step(action)
print(observation)
| [
"torch.nn.ReLU",
"torch.nn.CrossEntropyLoss",
"numpy.random.rand",
"torch.max",
"torch.from_numpy",
"torch.nn.MSELoss",
"torch.cuda.is_available",
"torch.nn.functional.softmax",
"os.path.exists",
"torch.nn.BatchNorm2d",
"tensorboardX.SummaryWriter",
"argparse.ArgumentParser",
"numpy.asarray"... | [((408, 423), 'tensorboardX.SummaryWriter', 'SummaryWriter', ([], {}), '()\n', (421, 423), False, 'from tensorboardX import SummaryWriter\n'), ((438, 484), 'collections.namedtuple', 'namedtuple', (['"""SavedAction"""', "['action', 'value']"], {}), "('SavedAction', ['action', 'value'])\n", (448, 484), False, 'from collections import namedtuple\n'), ((2076, 2144), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""SenseNet actor-critic example"""'}), "(description='SenseNet actor-critic example')\n", (2099, 2144), False, 'import time, os, math, inspect, re, sys, random, argparse\n'), ((4852, 4864), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (4862, 4864), True, 'import torch.nn as nn\n'), ((4948, 4969), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (4967, 4969), True, 'import torch.nn as nn\n'), ((3782, 3803), 'torch.Tensor', 'torch.Tensor', (['rewards'], {}), '(rewards)\n', (3794, 3803), False, 'import torch\n'), ((4240, 4281), 'torch.autograd.backward', 'autograd.backward', (['final_nodes', 'gradients'], {}), '(final_nodes, gradients)\n', (4257, 4281), True, 'import torch.autograd as autograd\n'), ((4542, 4567), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4565, 4567), False, 'import torch\n'), ((4622, 4668), 'os.path.exists', 'os.path.exists', (["(args.model_path + '/model.pkl')"], {}), "(args.model_path + '/model.pkl')\n", (4636, 4668), False, 'import time, os, math, inspect, re, sys, random, argparse\n'), ((5163, 5174), 'itertools.count', 'count', (['(1000)'], {}), '(1000)\n', (5168, 5174), False, 'from itertools import count\n'), ((621, 656), 'torch.nn.Linear', 'nn.Linear', (['observation_space_n', '(256)'], {}), '(observation_space_n, 256)\n', (630, 656), True, 'import torch.nn as nn\n'), ((676, 695), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(128)'], {}), '(256, 128)\n', (685, 695), True, 'import torch.nn as nn\n'), ((714, 733), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(128)'], {}), '(256, 128)\n', (723, 733), True, 'import torch.nn as nn\n'), ((757, 787), 'torch.nn.Linear', 'nn.Linear', (['(128)', 'action_space_n'], {}), '(128, action_space_n)\n', (766, 787), True, 'import torch.nn as nn\n'), ((810, 827), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(1)'], {}), '(128, 1)\n', (819, 827), True, 'import torch.nn as nn\n'), ((1770, 1804), 'torch.nn.Linear', 'nn.Linear', (['(80000)', 'classification_n'], {}), '(80000, classification_n)\n', (1779, 1804), True, 'import torch.nn as nn\n'), ((3278, 3294), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3292, 3294), True, 'import numpy as np\n'), ((3317, 3344), 'numpy.random.choice', 'np.random.choice', (['n_actions'], {}), '(n_actions)\n', (3333, 3344), True, 'import numpy as np\n'), ((1288, 1312), 'torch.nn.functional.softmax', 'F.softmax', (['action_scores'], {}), '(action_scores)\n', (1297, 1312), True, 'import torch.nn.functional as F\n'), ((1460, 1502), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(16)'], {'kernel_size': '(5)', 'padding': '(2)'}), '(1, 16, kernel_size=5, padding=2)\n', (1469, 1502), True, 'import torch.nn as nn\n'), ((1510, 1528), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(16)'], {}), '(16)\n', (1524, 1528), True, 'import torch.nn as nn\n'), ((1536, 1545), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1543, 1545), True, 'import torch.nn as nn\n'), ((1553, 1568), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (1565, 1568), True, 'import torch.nn as nn\n'), ((1609, 1652), 'torch.nn.Conv2d', 'nn.Conv2d', (['(16)', '(32)'], {'kernel_size': '(5)', 'padding': '(2)'}), '(16, 32, kernel_size=5, padding=2)\n', (1618, 1652), True, 'import torch.nn as nn\n'), ((1660, 1678), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(32)'], {}), '(32)\n', (1674, 1678), True, 'import torch.nn as nn\n'), ((1686, 1695), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1693, 1695), True, 'import torch.nn as nn\n'), ((1703, 1718), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (1715, 1718), True, 'import torch.nn as nn\n'), ((3441, 3456), 'torch.autograd.Variable', 'Variable', (['state'], {}), '(state)\n', (3449, 3456), False, 'from torch.autograd import Variable\n'), ((4193, 4206), 'torch.ones', 'torch.ones', (['(1)'], {}), '(1)\n', (4203, 4206), False, 'import torch\n'), ((4733, 4775), 'torch.load', 'torch.load', (["(args.model_path + '/model.pkl')"], {}), "(args.model_path + '/model.pkl')\n", (4743, 4775), False, 'import torch\n'), ((4799, 4839), 'torch.load', 'torch.load', (["(args.model_path + '/cnn.pkl')"], {}), "(args.model_path + '/cnn.pkl')\n", (4809, 4839), False, 'import torch\n'), ((3862, 3882), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (3870, 3882), True, 'import numpy as np\n'), ((4058, 4075), 'torch.Tensor', 'torch.Tensor', (['[r]'], {}), '([r])\n', (4070, 4075), False, 'import torch\n'), ((7205, 7248), 'os.path.join', 'os.path.join', (['args.model_path', '"""policy.pkl"""'], {}), "(args.model_path, 'policy.pkl')\n", (7217, 7248), False, 'import time, os, math, inspect, re, sys, random, argparse\n'), ((7288, 7328), 'os.path.join', 'os.path.join', (['args.model_path', '"""cnn.pkl"""'], {}), "(args.model_path, 'cnn.pkl')\n", (7300, 7328), False, 'import time, os, math, inspect, re, sys, random, argparse\n'), ((6003, 6018), 'torch.autograd.Variable', 'Variable', (['batch'], {}), '(batch)\n', (6011, 6018), False, 'from torch.autograd import Variable\n'), ((6038, 6054), 'torch.autograd.Variable', 'Variable', (['labels'], {}), '(labels)\n', (6046, 6054), False, 'from torch.autograd import Variable\n'), ((8040, 8051), 'torch.autograd.Variable', 'Variable', (['x'], {}), '(x)\n', (8048, 8051), False, 'from torch.autograd import Variable\n'), ((8102, 8127), 'torch.max', 'torch.max', (['output.data', '(1)'], {}), '(output.data, 1)\n', (8111, 8127), False, 'import torch\n'), ((3365, 3388), 'torch.from_numpy', 'torch.from_numpy', (['state'], {}), '(state)\n', (3381, 3388), False, 'import torch\n'), ((5769, 5787), 'numpy.asarray', 'np.asarray', (['labels'], {}), '(labels)\n', (5779, 5787), True, 'import numpy as np\n'), ((5890, 5915), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5913, 5915), False, 'import torch\n'), ((5713, 5730), 'numpy.asarray', 'np.asarray', (['batch'], {}), '(batch)\n', (5723, 5730), True, 'import numpy as np\n'), ((8012, 8025), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (8022, 8025), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 21 07:57:49 2021
@author: amts
"""
import networkx as nx
import random
from ant_colony import ant_colony
import math
size =16
#size = 4
G = nx.fast_gnp_random_graph(size, .3)
#G = nx.complete_graph(size)
#G = nx.binomial_tree(size)
for (u, v) in G.edges():
G.edges[u, v]['weight'] = random.randint(2, 5)
#M = nx.to_numpy_array(G)
#M = nx.adjacency_matrix(G)
#print(M)
pos = nx.spring_layout(G, weight='weight')
new_pos = {}
for key, arr in pos.items():
new_pos.update({key: tuple(arr)})
#with_labels=true is to show the node number in the output graph
nx.draw(G, pos=pos, with_labels = True, edge_color = 'black' ,
width = 1, alpha = 0.7)
labels = nx.get_edge_attributes(G,'weight')
#nx.draw_networkx_edge_labels(G,pos,edge_labels=labels)
# print(type(new_pos.keys()))
# given some nodes, and some locations...
test_nodes = new_pos
"""
# function to get distance between nodes
def distance(start, end):
x_distance = abs(start[0] - end[0])
y_distance = abs(start[1] - end[1])
# c = sqrt(a² + b²)
return math.sqrt(pow(x_distance, 2) + pow(y_distance, 2))
"""
# test function
def distance(start, end):
global G
return nx.shortest_path_length(G, source=start, target=end,
weight='weight' )
# erro division by zero
# make a colony of ants
colony = ant_colony(test_nodes, distance)
# that will find the optimal, solution with ACO
answer = colony.mainloop()
| [
"ant_colony.ant_colony",
"networkx.get_edge_attributes",
"networkx.spring_layout",
"networkx.shortest_path_length",
"networkx.fast_gnp_random_graph",
"random.randint",
"networkx.draw"
] | [((215, 250), 'networkx.fast_gnp_random_graph', 'nx.fast_gnp_random_graph', (['size', '(0.3)'], {}), '(size, 0.3)\n', (239, 250), True, 'import networkx as nx\n'), ((458, 494), 'networkx.spring_layout', 'nx.spring_layout', (['G'], {'weight': '"""weight"""'}), "(G, weight='weight')\n", (474, 494), True, 'import networkx as nx\n'), ((644, 721), 'networkx.draw', 'nx.draw', (['G'], {'pos': 'pos', 'with_labels': '(True)', 'edge_color': '"""black"""', 'width': '(1)', 'alpha': '(0.7)'}), "(G, pos=pos, with_labels=True, edge_color='black', width=1, alpha=0.7)\n", (651, 721), True, 'import networkx as nx\n'), ((750, 785), 'networkx.get_edge_attributes', 'nx.get_edge_attributes', (['G', '"""weight"""'], {}), "(G, 'weight')\n", (772, 785), True, 'import networkx as nx\n'), ((1423, 1455), 'ant_colony.ant_colony', 'ant_colony', (['test_nodes', 'distance'], {}), '(test_nodes, distance)\n', (1433, 1455), False, 'from ant_colony import ant_colony\n'), ((363, 383), 'random.randint', 'random.randint', (['(2)', '(5)'], {}), '(2, 5)\n', (377, 383), False, 'import random\n'), ((1254, 1323), 'networkx.shortest_path_length', 'nx.shortest_path_length', (['G'], {'source': 'start', 'target': 'end', 'weight': '"""weight"""'}), "(G, source=start, target=end, weight='weight')\n", (1277, 1323), True, 'import networkx as nx\n')] |
#!/usr/bin/env python3
# Copyright 2021 Xiaomi Corporation (authors: <NAME>)
import kaldifst
# see also https://www.openfst.org/twiki/bin/view/FST/DeterminizeDoc
def test_determinize():
isym = kaldifst.SymbolTable()
isym.add_symbol("<eps>", 0)
isym.add_symbol("a", 1)
isym.add_symbol("c", 2)
isym.add_symbol("d", 3)
osym = kaldifst.SymbolTable()
osym.add_symbol("<eps>", 0)
osym.add_symbol("p", 1)
osym.add_symbol("q", 2)
osym.add_symbol("r", 3)
osym.add_symbol("s", 4)
s = """
0 1 a p 1
0 2 a q 2
1 3 c r 5
1 3 c r 4
2 3 d s 6
3 0
"""
fst = kaldifst.compile(s, acceptor=False, isymbols=isym, osymbols=osym)
s1 = kaldifst.draw(
fst, acceptor=False, isymbols=isym, osymbols=osym, portrait=True
)
print(s1) # see https://git.io/JSPvH
det_fst = kaldifst.determinize(fst)
s2 = kaldifst.draw(
det_fst, acceptor=False, isymbols=isym, osymbols=osym, portrait=True
)
print(s2) # see https://git.io/JSPTF
def main():
test_determinize()
if __name__ == "__main__":
main()
| [
"kaldifst.compile",
"kaldifst.SymbolTable",
"kaldifst.determinize",
"kaldifst.draw"
] | [((208, 230), 'kaldifst.SymbolTable', 'kaldifst.SymbolTable', ([], {}), '()\n', (228, 230), False, 'import kaldifst\n'), ((359, 381), 'kaldifst.SymbolTable', 'kaldifst.SymbolTable', ([], {}), '()\n', (379, 381), False, 'import kaldifst\n'), ((660, 725), 'kaldifst.compile', 'kaldifst.compile', (['s'], {'acceptor': '(False)', 'isymbols': 'isym', 'osymbols': 'osym'}), '(s, acceptor=False, isymbols=isym, osymbols=osym)\n', (676, 725), False, 'import kaldifst\n'), ((735, 814), 'kaldifst.draw', 'kaldifst.draw', (['fst'], {'acceptor': '(False)', 'isymbols': 'isym', 'osymbols': 'osym', 'portrait': '(True)'}), '(fst, acceptor=False, isymbols=isym, osymbols=osym, portrait=True)\n', (748, 814), False, 'import kaldifst\n'), ((886, 911), 'kaldifst.determinize', 'kaldifst.determinize', (['fst'], {}), '(fst)\n', (906, 911), False, 'import kaldifst\n'), ((921, 1008), 'kaldifst.draw', 'kaldifst.draw', (['det_fst'], {'acceptor': '(False)', 'isymbols': 'isym', 'osymbols': 'osym', 'portrait': '(True)'}), '(det_fst, acceptor=False, isymbols=isym, osymbols=osym,\n portrait=True)\n', (934, 1008), False, 'import kaldifst\n')] |
import requests # this is to GET the javascript
import re # this is to do regular expression and extract the list of words
import streamlit as st #web app development
m = st.markdown("""
<style>
div.stButton > button:first-child {
background-color: #0000FF;
color:#ffffff;
}
div.stButton > button:hover {
background-color: #FF0000;
color:##ff99ff;
}
</style>""", unsafe_allow_html=True)
# extract the wordle solution list from the source page javascript
@st.cache
def extract_solution():
wordle_js = requests.get("https://www.powerlanguage.co.uk/wordle/main.c1506a22.js")
m = re.findall(r"var La=\[(.*?)\]", wordle_js.text, flags=re.S)
word_list = m[0].split(",")
return(word_list)
word_list = extract_solution()
st.title("👻 Wordle Spoiler 👿")
index = st.text_input(label = "Enter the Wordle Number for which you need solution")
st.error(" ⚠️ Do you really want to do this? I mean you can always play for fun!!!")
if st.button("Yes I just want to spoil the mood",):
st.balloons()
st.write(word_list[int(index)]) | [
"streamlit.markdown",
"streamlit.balloons",
"streamlit.button",
"requests.get",
"streamlit.error",
"re.findall",
"streamlit.text_input",
"streamlit.title"
] | [((175, 420), 'streamlit.markdown', 'st.markdown', (['"""\n<style>\ndiv.stButton > button:first-child {\n background-color: #0000FF;\n color:#ffffff;\n}\ndiv.stButton > button:hover {\n background-color: #FF0000;\n color:##ff99ff;\n }\n</style>"""'], {'unsafe_allow_html': '(True)'}), '(\n """\n<style>\ndiv.stButton > button:first-child {\n background-color: #0000FF;\n color:#ffffff;\n}\ndiv.stButton > button:hover {\n background-color: #FF0000;\n color:##ff99ff;\n }\n</style>"""\n , unsafe_allow_html=True)\n', (186, 420), True, 'import streamlit as st\n'), ((762, 792), 'streamlit.title', 'st.title', (['"""👻 Wordle Spoiler 👿"""'], {}), "('👻 Wordle Spoiler 👿')\n", (770, 792), True, 'import streamlit as st\n'), ((802, 876), 'streamlit.text_input', 'st.text_input', ([], {'label': '"""Enter the Wordle Number for which you need solution"""'}), "(label='Enter the Wordle Number for which you need solution')\n", (815, 876), True, 'import streamlit as st\n'), ((880, 969), 'streamlit.error', 'st.error', (['""" ⚠️ Do you really want to do this? I mean you can always play for fun!!!"""'], {}), "(\n ' ⚠️ Do you really want to do this? I mean you can always play for fun!!!')\n", (888, 969), True, 'import streamlit as st\n'), ((969, 1015), 'streamlit.button', 'st.button', (['"""Yes I just want to spoil the mood"""'], {}), "('Yes I just want to spoil the mood')\n", (978, 1015), True, 'import streamlit as st\n'), ((532, 603), 'requests.get', 'requests.get', (['"""https://www.powerlanguage.co.uk/wordle/main.c1506a22.js"""'], {}), "('https://www.powerlanguage.co.uk/wordle/main.c1506a22.js')\n", (544, 603), False, 'import requests\n'), ((613, 673), 're.findall', 're.findall', (['"""var La=\\\\[(.*?)\\\\]"""', 'wordle_js.text'], {'flags': 're.S'}), "('var La=\\\\[(.*?)\\\\]', wordle_js.text, flags=re.S)\n", (623, 673), False, 'import re\n'), ((1022, 1035), 'streamlit.balloons', 'st.balloons', ([], {}), '()\n', (1033, 1035), True, 'import streamlit as st\n')] |
import pandas as pd
import re
import numpy as np
import os
import sys
from collections import OrderedDict, defaultdict
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats, integrate
# load msncodes
msncodes = pd.read_csv("data/csv/original/msncodes.csv")
# load state data
az = pd.read_csv("data/csv/state_data/az_data.csv", engine='c', low_memory=True)
ca = pd.read_csv("data/csv/state_data/ca_data.csv", engine='c', low_memory=True)
nm = pd.read_csv("data/csv/state_data/nm_data.csv", engine='c', low_memory=True)
tx = pd.read_csv("data/csv/state_data/tx_data.csv", engine='c', low_memory=True)
# select msncodes of renewable energy
msn = []
description = []
unit = []
for i in range(len(msncodes["MSN"])):
if re.search("[R|r]enewable", msncodes["Description"][i]):
msn.append(msncodes["MSN"][i])
description.append(msncodes["Description"][i])
unit.append(msncodes["Unit"][i])
renewable = OrderedDict()
renewable["MSN"] = msn
renewable["Description"] = description
renewable["Unit"] = unit
renewable_data = pd.DataFrame(renewable)
renewable_data.to_csv("data/csv/renewable/renewable.csv", index=False, index_label=False, sep=',')
# select data of renewable energy
# az
az_msn = []
az_year = []
az_data = []
for i in range(len(az["MSN"])):
if az["Year"][i] == 2009:
if az["MSN"][i] == "REPRB":
az_reprb = az["Data"][i]
az_msn.append(az["MSN"][i])
az_year.append(az["Year"][i])
az_data.append(az["Data"][i])
elif az["MSN"][i] == "RETCB":
az_retcb = az["Data"][i]
az_msn.append(az["MSN"][i])
az_year.append(az["Year"][i])
az_data.append(az["Data"][i])
elif az["MSN"][i] == "ROPRB":
az_roprb = az["Data"][i]
az_msn.append(az["MSN"][i])
az_year.append(az["Year"][i])
az_data.append(az["Data"][i])
elif az["MSN"][i] == "TPOPP":
az_tpopp=az["Data"][i]
az_msn.append(az["MSN"][i])
az_year.append(az["Year"][i])
az_data.append(az["Data"][i])
elif az["MSN"][i] == "TETCB":
az_tetcb = az["Data"][i]
az_msn.append(az["MSN"][i])
az_year.append(az["Year"][i])
az_data.append(az["Data"][i])
elif az["MSN"][i] == "TEPRB":
az_teprb = az["Data"][i]
az_msn.append(az["MSN"][i])
az_year.append(az["Year"][i])
az_data.append(az["Data"][i])
else:
pass
else:
pass
az_renewable = OrderedDict()
az_renewable["MSN"] = az_msn
az_renewable["Year"] = az_year
az_renewable["Data"] = az_data
az_renewable_data = pd.DataFrame(az_renewable)
az_renewable_data.to_csv("data/csv/renewable/az_renewable.csv", index=False, index_label=False, sep=',')
# ca
ca_msn = []
ca_year = []
ca_data = []
for i in range(len(ca["MSN"])):
if ca["Year"][i] == 2009:
if ca["MSN"][i] == "REPRB":
ca_reprb = ca["Data"][i]
ca_msn.append(ca["MSN"][i])
ca_year.append(ca["Year"][i])
ca_data.append(ca["Data"][i])
elif ca["MSN"][i] == "RETCB":
ca_retcb = ca["Data"][i]
ca_msn.append(ca["MSN"][i])
ca_year.append(ca["Year"][i])
ca_data.append(ca["Data"][i])
elif ca["MSN"][i] == "ROPRB":
ca_roprb = ca["Data"][i]
ca_msn.append(ca["MSN"][i])
ca_year.append(ca["Year"][i])
ca_data.append(ca["Data"][i])
elif ca["MSN"][i] == "TPOPP":
ca_tpopp = ca["Data"][i]
ca_msn.append(ca["MSN"][i])
ca_year.append(ca["Year"][i])
ca_data.append(ca["Data"][i])
elif ca["MSN"][i] == "TETCB":
ca_tetcb = ca["Data"][i]
ca_msn.append(ca["MSN"][i])
ca_year.append(ca["Year"][i])
ca_data.append(ca["Data"][i])
elif ca["MSN"][i] == "TEPRB":
ca_teprb = ca["Data"][i]
ca_msn.append(ca["MSN"][i])
ca_year.append(ca["Year"][i])
ca_data.append(ca["Data"][i])
else:
pass
else:
pass
ca_renewable = OrderedDict()
ca_renewable["MSN"] = ca_msn
ca_renewable["Year"] = ca_year
ca_renewable["Data"] = ca_data
ca_renewable_data = pd.DataFrame(ca_renewable)
ca_renewable_data.to_csv("data/csv/renewable/ca_renewable.csv", index=False, index_label=False, sep=',')
# nm
nm_msn = []
nm_year = []
nm_data = []
for i in range(len(nm["MSN"])):
if nm["Year"][i] == 2009:
if nm["MSN"][i] == "REPRB":
nm_reprb = nm["Data"][i]
nm_msn.append(nm["MSN"][i])
nm_year.append(nm["Year"][i])
nm_data.append(nm["Data"][i])
elif nm["MSN"][i] == "RETCB":
nm_retcb = nm["Data"][i]
nm_msn.append(nm["MSN"][i])
nm_year.append(nm["Year"][i])
nm_data.append(nm["Data"][i])
elif nm["MSN"][i] == "ROPRB":
nm_roprb = nm["Data"][i]
nm_msn.append(nm["MSN"][i])
nm_year.append(nm["Year"][i])
nm_data.append(nm["Data"][i])
elif nm["MSN"][i] == "TPOPP":
nm_tpopp = nm["Data"][i]
nm_msn.append(nm["MSN"][i])
nm_year.append(nm["Year"][i])
nm_data.append(nm["Data"][i])
elif nm["MSN"][i] == "TETCB":
nm_tetcb = nm["Data"][i]
nm_msn.append(nm["MSN"][i])
nm_year.append(nm["Year"][i])
nm_data.append(nm["Data"][i])
elif az["MSN"][i] == "TEPRB":
nm_teprb = nm["Data"][i]
nm_msn.append(nm["MSN"][i])
nm_year.append(nm["Year"][i])
nm_data.append(nm["Data"][i])
else:
pass
else:
pass
nm_renewable = OrderedDict()
nm_renewable["MSN"] = nm_msn
nm_renewable["Year"] = nm_year
nm_renewable["Data"] = nm_data
nm_renewable_data = pd.DataFrame(nm_renewable)
nm_renewable_data.to_csv("data/csv/renewable/nm_renewable.csv", index=False, index_label=False, sep=',')
# tx
tx_msn = []
tx_year = []
tx_data = []
for i in range(len(tx["MSN"])):
if tx["Year"][i] == 2009:
if tx["MSN"][i] == "REPRB":
tx_reprb = tx["Data"][i]
tx_msn.append(tx["MSN"][i])
tx_year.append(tx["Year"][i])
tx_data.append(tx["Data"][i])
elif tx["MSN"][i] == "RETCB":
tx_retcb = tx["Data"][i]
tx_msn.append(tx["MSN"][i])
tx_year.append(tx["Year"][i])
tx_data.append(tx["Data"][i])
elif tx["MSN"][i] == "ROPRB":
tx_roprb = tx["Data"][i]
tx_msn.append(tx["MSN"][i])
tx_year.append(tx["Year"][i])
tx_data.append(tx["Data"][i])
elif tx["MSN"][i] == "TPOPP":
tx_tpopp = tx["Data"][i]
tx_msn.append(tx["MSN"][i])
tx_year.append(tx["Year"][i])
tx_data.append(tx["Data"][i])
elif tx["MSN"][i] == "TETCB":
tx_tetcb = tx["Data"][i]
tx_msn.append(tx["MSN"][i])
tx_year.append(tx["Year"][i])
tx_data.append(tx["Data"][i])
elif tx["MSN"][i] == "TEPRB":
tx_teprb = tx["Data"][i]
tx_msn.append(tx["MSN"][i])
tx_year.append(tx["Year"][i])
tx_data.append(tx["Data"][i])
else:
pass
else:
pass
tx_renewable = OrderedDict()
tx_renewable["MSN"] = tx_msn
tx_renewable["Year"] = tx_year
tx_renewable["Data"] = tx_data
tx_renewable_data = pd.DataFrame(tx_renewable)
tx_renewable_data.to_csv("data/csv/renewable/tx_renewable.csv", index=False, index_label=False, sep=',')
| [
"pandas.DataFrame",
"collections.OrderedDict",
"pandas.read_csv",
"re.search"
] | [((261, 306), 'pandas.read_csv', 'pd.read_csv', (['"""data/csv/original/msncodes.csv"""'], {}), "('data/csv/original/msncodes.csv')\n", (272, 306), True, 'import pandas as pd\n'), ((331, 406), 'pandas.read_csv', 'pd.read_csv', (['"""data/csv/state_data/az_data.csv"""'], {'engine': '"""c"""', 'low_memory': '(True)'}), "('data/csv/state_data/az_data.csv', engine='c', low_memory=True)\n", (342, 406), True, 'import pandas as pd\n'), ((412, 487), 'pandas.read_csv', 'pd.read_csv', (['"""data/csv/state_data/ca_data.csv"""'], {'engine': '"""c"""', 'low_memory': '(True)'}), "('data/csv/state_data/ca_data.csv', engine='c', low_memory=True)\n", (423, 487), True, 'import pandas as pd\n'), ((493, 568), 'pandas.read_csv', 'pd.read_csv', (['"""data/csv/state_data/nm_data.csv"""'], {'engine': '"""c"""', 'low_memory': '(True)'}), "('data/csv/state_data/nm_data.csv', engine='c', low_memory=True)\n", (504, 568), True, 'import pandas as pd\n'), ((574, 649), 'pandas.read_csv', 'pd.read_csv', (['"""data/csv/state_data/tx_data.csv"""'], {'engine': '"""c"""', 'low_memory': '(True)'}), "('data/csv/state_data/tx_data.csv', engine='c', low_memory=True)\n", (585, 649), True, 'import pandas as pd\n'), ((974, 987), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (985, 987), False, 'from collections import OrderedDict, defaultdict\n'), ((1092, 1115), 'pandas.DataFrame', 'pd.DataFrame', (['renewable'], {}), '(renewable)\n', (1104, 1115), True, 'import pandas as pd\n'), ((2616, 2629), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2627, 2629), False, 'from collections import OrderedDict, defaultdict\n'), ((2741, 2767), 'pandas.DataFrame', 'pd.DataFrame', (['az_renewable'], {}), '(az_renewable)\n', (2753, 2767), True, 'import pandas as pd\n'), ((4240, 4253), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4251, 4253), False, 'from collections import OrderedDict, defaultdict\n'), ((4365, 4391), 'pandas.DataFrame', 'pd.DataFrame', (['ca_renewable'], {}), '(ca_renewable)\n', (4377, 4391), True, 'import pandas as pd\n'), ((5864, 5877), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5875, 5877), False, 'from collections import OrderedDict, defaultdict\n'), ((5989, 6015), 'pandas.DataFrame', 'pd.DataFrame', (['nm_renewable'], {}), '(nm_renewable)\n', (6001, 6015), True, 'import pandas as pd\n'), ((7488, 7501), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7499, 7501), False, 'from collections import OrderedDict, defaultdict\n'), ((7613, 7639), 'pandas.DataFrame', 'pd.DataFrame', (['tx_renewable'], {}), '(tx_renewable)\n', (7625, 7639), True, 'import pandas as pd\n'), ((770, 824), 're.search', 're.search', (['"""[R|r]enewable"""', "msncodes['Description'][i]"], {}), "('[R|r]enewable', msncodes['Description'][i])\n", (779, 824), False, 'import re\n')] |
"""empty message
Revision ID: <KEY>
Revises: afacc23c2670
Create Date: 2019-11-27 09:22:33.188541
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "<KEY>"
down_revision = "afacc23c2670"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index("ix_kind_images_id", table_name="kind_images")
op.drop_index("ix_kind_images_kind_id", table_name="kind_images")
op.drop_table("kind_images")
op.add_column("kinds", sa.Column("image_1", sa.String(length=255), nullable=True))
op.add_column("kinds", sa.Column("image_2", sa.String(length=255), nullable=True))
op.add_column("kinds", sa.Column("image_3", sa.String(length=255), nullable=True))
op.add_column("kinds", sa.Column("image_4", sa.String(length=255), nullable=True))
op.add_column("kinds", sa.Column("image_5", sa.String(length=255), nullable=True))
op.add_column("kinds", sa.Column("image_6", sa.String(length=255), nullable=True))
op.create_index(op.f("ix_kinds_image_1"), "kinds", ["image_1"], unique=True)
op.create_index(op.f("ix_kinds_image_2"), "kinds", ["image_2"], unique=True)
op.create_index(op.f("ix_kinds_image_3"), "kinds", ["image_3"], unique=True)
op.create_index(op.f("ix_kinds_image_4"), "kinds", ["image_4"], unique=True)
op.create_index(op.f("ix_kinds_image_5"), "kinds", ["image_5"], unique=True)
op.create_index(op.f("ix_kinds_image_6"), "kinds", ["image_6"], unique=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f("ix_kinds_image_6"), table_name="kinds")
op.drop_index(op.f("ix_kinds_image_5"), table_name="kinds")
op.drop_index(op.f("ix_kinds_image_4"), table_name="kinds")
op.drop_index(op.f("ix_kinds_image_3"), table_name="kinds")
op.drop_index(op.f("ix_kinds_image_2"), table_name="kinds")
op.drop_index(op.f("ix_kinds_image_1"), table_name="kinds")
op.drop_column("kinds", "image_6")
op.drop_column("kinds", "image_5")
op.drop_column("kinds", "image_4")
op.drop_column("kinds", "image_3")
op.drop_column("kinds", "image_2")
op.drop_column("kinds", "image_1")
op.create_table(
"kind_images",
sa.Column("id", postgresql.UUID(), autoincrement=False, nullable=False),
sa.Column("created_at", postgresql.TIMESTAMP(), autoincrement=False, nullable=True),
sa.Column("modified_at", postgresql.TIMESTAMP(), autoincrement=False, nullable=True),
sa.Column("kind_id", postgresql.UUID(), autoincrement=False, nullable=True),
sa.Column("name", sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.Column("order_number", sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(["kind_id"], ["kinds.id"], name="kind_images_kind_id_fkey"),
sa.PrimaryKeyConstraint("id", name="kind_images_pkey"),
)
op.create_index("ix_kind_images_kind_id", "kind_images", ["kind_id"], unique=False)
op.create_index("ix_kind_images_id", "kind_images", ["id"], unique=False)
# ### end Alembic commands ###
| [
"sqlalchemy.ForeignKeyConstraint",
"alembic.op.drop_table",
"alembic.op.f",
"alembic.op.drop_column",
"sqlalchemy.PrimaryKeyConstraint",
"sqlalchemy.VARCHAR",
"sqlalchemy.dialects.postgresql.UUID",
"sqlalchemy.String",
"sqlalchemy.INTEGER",
"alembic.op.drop_index",
"sqlalchemy.dialects.postgresq... | [((412, 472), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_kind_images_id"""'], {'table_name': '"""kind_images"""'}), "('ix_kind_images_id', table_name='kind_images')\n", (425, 472), False, 'from alembic import op\n'), ((477, 542), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_kind_images_kind_id"""'], {'table_name': '"""kind_images"""'}), "('ix_kind_images_kind_id', table_name='kind_images')\n", (490, 542), False, 'from alembic import op\n'), ((547, 575), 'alembic.op.drop_table', 'op.drop_table', (['"""kind_images"""'], {}), "('kind_images')\n", (560, 575), False, 'from alembic import op\n'), ((2092, 2126), 'alembic.op.drop_column', 'op.drop_column', (['"""kinds"""', '"""image_6"""'], {}), "('kinds', 'image_6')\n", (2106, 2126), False, 'from alembic import op\n'), ((2131, 2165), 'alembic.op.drop_column', 'op.drop_column', (['"""kinds"""', '"""image_5"""'], {}), "('kinds', 'image_5')\n", (2145, 2165), False, 'from alembic import op\n'), ((2170, 2204), 'alembic.op.drop_column', 'op.drop_column', (['"""kinds"""', '"""image_4"""'], {}), "('kinds', 'image_4')\n", (2184, 2204), False, 'from alembic import op\n'), ((2209, 2243), 'alembic.op.drop_column', 'op.drop_column', (['"""kinds"""', '"""image_3"""'], {}), "('kinds', 'image_3')\n", (2223, 2243), False, 'from alembic import op\n'), ((2248, 2282), 'alembic.op.drop_column', 'op.drop_column', (['"""kinds"""', '"""image_2"""'], {}), "('kinds', 'image_2')\n", (2262, 2282), False, 'from alembic import op\n'), ((2287, 2321), 'alembic.op.drop_column', 'op.drop_column', (['"""kinds"""', '"""image_1"""'], {}), "('kinds', 'image_1')\n", (2301, 2321), False, 'from alembic import op\n'), ((3058, 3145), 'alembic.op.create_index', 'op.create_index', (['"""ix_kind_images_kind_id"""', '"""kind_images"""', "['kind_id']"], {'unique': '(False)'}), "('ix_kind_images_kind_id', 'kind_images', ['kind_id'],\n unique=False)\n", (3073, 3145), False, 'from alembic import op\n'), ((3146, 3219), 'alembic.op.create_index', 'op.create_index', (['"""ix_kind_images_id"""', '"""kind_images"""', "['id']"], {'unique': '(False)'}), "('ix_kind_images_id', 'kind_images', ['id'], unique=False)\n", (3161, 3219), False, 'from alembic import op\n'), ((1118, 1142), 'alembic.op.f', 'op.f', (['"""ix_kinds_image_1"""'], {}), "('ix_kinds_image_1')\n", (1122, 1142), False, 'from alembic import op\n'), ((1199, 1223), 'alembic.op.f', 'op.f', (['"""ix_kinds_image_2"""'], {}), "('ix_kinds_image_2')\n", (1203, 1223), False, 'from alembic import op\n'), ((1280, 1304), 'alembic.op.f', 'op.f', (['"""ix_kinds_image_3"""'], {}), "('ix_kinds_image_3')\n", (1284, 1304), False, 'from alembic import op\n'), ((1361, 1385), 'alembic.op.f', 'op.f', (['"""ix_kinds_image_4"""'], {}), "('ix_kinds_image_4')\n", (1365, 1385), False, 'from alembic import op\n'), ((1442, 1466), 'alembic.op.f', 'op.f', (['"""ix_kinds_image_5"""'], {}), "('ix_kinds_image_5')\n", (1446, 1466), False, 'from alembic import op\n'), ((1523, 1547), 'alembic.op.f', 'op.f', (['"""ix_kinds_image_6"""'], {}), "('ix_kinds_image_6')\n", (1527, 1547), False, 'from alembic import op\n'), ((1722, 1746), 'alembic.op.f', 'op.f', (['"""ix_kinds_image_6"""'], {}), "('ix_kinds_image_6')\n", (1726, 1746), False, 'from alembic import op\n'), ((1786, 1810), 'alembic.op.f', 'op.f', (['"""ix_kinds_image_5"""'], {}), "('ix_kinds_image_5')\n", (1790, 1810), False, 'from alembic import op\n'), ((1850, 1874), 'alembic.op.f', 'op.f', (['"""ix_kinds_image_4"""'], {}), "('ix_kinds_image_4')\n", (1854, 1874), False, 'from alembic import op\n'), ((1914, 1938), 'alembic.op.f', 'op.f', (['"""ix_kinds_image_3"""'], {}), "('ix_kinds_image_3')\n", (1918, 1938), False, 'from alembic import op\n'), ((1978, 2002), 'alembic.op.f', 'op.f', (['"""ix_kinds_image_2"""'], {}), "('ix_kinds_image_2')\n", (1982, 2002), False, 'from alembic import op\n'), ((2042, 2066), 'alembic.op.f', 'op.f', (['"""ix_kinds_image_1"""'], {}), "('ix_kinds_image_1')\n", (2046, 2066), False, 'from alembic import op\n'), ((2899, 2987), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['kind_id']", "['kinds.id']"], {'name': '"""kind_images_kind_id_fkey"""'}), "(['kind_id'], ['kinds.id'], name=\n 'kind_images_kind_id_fkey')\n", (2922, 2987), True, 'import sqlalchemy as sa\n'), ((2992, 3046), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {'name': '"""kind_images_pkey"""'}), "('id', name='kind_images_pkey')\n", (3015, 3046), True, 'import sqlalchemy as sa\n'), ((624, 645), 'sqlalchemy.String', 'sa.String', ([], {'length': '(255)'}), '(length=255)\n', (633, 645), True, 'import sqlalchemy as sa\n'), ((711, 732), 'sqlalchemy.String', 'sa.String', ([], {'length': '(255)'}), '(length=255)\n', (720, 732), True, 'import sqlalchemy as sa\n'), ((798, 819), 'sqlalchemy.String', 'sa.String', ([], {'length': '(255)'}), '(length=255)\n', (807, 819), True, 'import sqlalchemy as sa\n'), ((885, 906), 'sqlalchemy.String', 'sa.String', ([], {'length': '(255)'}), '(length=255)\n', (894, 906), True, 'import sqlalchemy as sa\n'), ((972, 993), 'sqlalchemy.String', 'sa.String', ([], {'length': '(255)'}), '(length=255)\n', (981, 993), True, 'import sqlalchemy as sa\n'), ((1059, 1080), 'sqlalchemy.String', 'sa.String', ([], {'length': '(255)'}), '(length=255)\n', (1068, 1080), True, 'import sqlalchemy as sa\n'), ((2390, 2407), 'sqlalchemy.dialects.postgresql.UUID', 'postgresql.UUID', ([], {}), '()\n', (2405, 2407), False, 'from sqlalchemy.dialects import postgresql\n'), ((2479, 2501), 'sqlalchemy.dialects.postgresql.TIMESTAMP', 'postgresql.TIMESTAMP', ([], {}), '()\n', (2499, 2501), False, 'from sqlalchemy.dialects import postgresql\n'), ((2573, 2595), 'sqlalchemy.dialects.postgresql.TIMESTAMP', 'postgresql.TIMESTAMP', ([], {}), '()\n', (2593, 2595), False, 'from sqlalchemy.dialects import postgresql\n'), ((2663, 2680), 'sqlalchemy.dialects.postgresql.UUID', 'postgresql.UUID', ([], {}), '()\n', (2678, 2680), False, 'from sqlalchemy.dialects import postgresql\n'), ((2745, 2767), 'sqlalchemy.VARCHAR', 'sa.VARCHAR', ([], {'length': '(255)'}), '(length=255)\n', (2755, 2767), True, 'import sqlalchemy as sa\n'), ((2840, 2852), 'sqlalchemy.INTEGER', 'sa.INTEGER', ([], {}), '()\n', (2850, 2852), True, 'import sqlalchemy as sa\n')] |
import unittest
import collections_and_iterators
""" Linked lists - TESTS
Testing Collections programming examples from collections.py
"""
class TestObjectMethods(unittest.TestCase):
def setUp(self):
self.singleLinkList = collections_and_iterators.SinglyLinkedList()
self.singleLinkListData = collections_and_iterators.SinglyLinkedList()
self.singleLinkListData.append("Cosmo")
self.singleLinkListData.append("Allie")
self.singleLinkListData.append("Watson")
self.doubleLinkList = collections_and_iterators.DoublyLinkedList()
self.doubleLinkListData = collections_and_iterators.DoublyLinkedList()
#these totally aren't some of Gabby's courses for this semester
self.doubleLinkListData.append("COM S 228")
self.doubleLinkListData.append("PHIL 343")
self.doubleLinkListData.append("COM S 444")
#test that a newly initialized singly linked list has size 0, null head and null cursor
def test_empty_single_list(self):
self.assertEqual(0, self.singleLinkList.size)
self.assertIsNone(self.singleLinkList.head)
self.assertIsNone(self.singleLinkList.cursor)
#__contains__ should return true if the list contains specified data
def test_contains_success(self):
self.assertTrue("Cosmo" in self.singleLinkListData)
self.assertTrue("Allie" in self.singleLinkListData)
self.assertTrue("Watson" in self.singleLinkListData)
#__contains should return false if the list does not contained specified data, d u h
def test_contains_failure(self):
self.assertFalse("Gabby" in self.singleLinkListData)
self.assertFalse("Thomas" in self.singleLinkListData)
#append should add data to the end of the list
def test_append_success(self):
self.assertEqual("Cosmo", self.singleLinkListData[0])
self.assertEqual("Allie", self.singleLinkListData[1])
self.assertEqual("Watson", self.singleLinkListData[2])
#append should raise an exception when trying to
def test_append_failure(self):
with self.assertRaises(IndexError):
self.singleLinkListData[3]
self.singleLinkListData.append("Foley")
self.assertEqual("Foley", self.singleLinkListData[3])
#__getitem__ should get the data at the specified index unless the specified index is out of bounds. then it should throw an exception
def test_getitem_success(self):
self.assertEqual("Cosmo", self.singleLinkListData.__getitem__(0))
self.assertEqual("Allie", self.singleLinkListData.__getitem__(1))
self.assertEqual("Watson", self.singleLinkListData.__getitem__(2))
def test_getitem_failure(self):
with self.assertRaises(IndexError):
self.singleLinkListData.__getitem__(3)
self.singleLinkListData.__getitem__(-3)
#__setitem__ should change the data at a given index
def test_setitem_success(self):
self.assertEqual("Cosmo", self.singleLinkListData[0])
self.singleLinkListData[0] = "Smalls"
self.assertEqual("Smalls", self.singleLinkListData[0])
#__setitem__ should raise an exception when trying to access an element that does not exist
def test_setitem_failure(self):
with self.assertRaises(IndexError):
self.singleLinkListData[5] = "Bruno"
self.singleLinkListData[-1] = "Lucie"
#test that a newly initialized doubly linked list has size 0, null head and null cursor
def test_empty_double_list(self):
self.assertEqual(0, self.doubleLinkList.size)
self.assertIsNone(self.doubleLinkList.head)
self.assertIsNone(self.doubleLinkList.cursor)
def test_insert_success(self):
#The list should look like:
#COM S 228, PHIL 343, COM S 444
self.assertEqual("COM S 228", self.doubleLinkListData[0])
self.assertEqual("PHIL 343", self.doubleLinkListData[1])
self.assertEqual("COM S 444", self.doubleLinkListData[2])
self.doubleLinkListData.insert("ENGL 314", 0)
#Now it should look like:
#ENGL 314, COM S 228, PHIL 343, COM S 444
self.assertEqual("ENGL 314", self.doubleLinkListData[0])
self.assertEqual("COM S 228", self.doubleLinkListData[1])
self.assertEqual("PHIL 343", self.doubleLinkListData[2])
self.assertEqual("COM S 444", self.doubleLinkListData[3])
self.doubleLinkListData.insert("MATH 207", 2)
#ENGL 314, COM S 228, MATH 207, #PHIL 343, #COM S 444
self.assertEqual("ENGL 314", self.doubleLinkListData[0])
self.assertEqual("COM S 228", self.doubleLinkListData[1])
self.assertEqual("MATH 207", self.doubleLinkListData[2])
self.assertEqual("PHIL 343", self.doubleLinkListData[3])
self.assertEqual("COM S 444", self.doubleLinkListData[4])
def test_insert_fauilure(self):
pass
if __name__ == '__main__':
unittest.main(verbosity=2)
| [
"unittest.main",
"collections_and_iterators.DoublyLinkedList",
"collections_and_iterators.SinglyLinkedList"
] | [((5045, 5071), 'unittest.main', 'unittest.main', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (5058, 5071), False, 'import unittest\n'), ((249, 293), 'collections_and_iterators.SinglyLinkedList', 'collections_and_iterators.SinglyLinkedList', ([], {}), '()\n', (291, 293), False, 'import collections_and_iterators\n'), ((331, 375), 'collections_and_iterators.SinglyLinkedList', 'collections_and_iterators.SinglyLinkedList', ([], {}), '()\n', (373, 375), False, 'import collections_and_iterators\n'), ((557, 601), 'collections_and_iterators.DoublyLinkedList', 'collections_and_iterators.DoublyLinkedList', ([], {}), '()\n', (599, 601), False, 'import collections_and_iterators\n'), ((637, 681), 'collections_and_iterators.DoublyLinkedList', 'collections_and_iterators.DoublyLinkedList', ([], {}), '()\n', (679, 681), False, 'import collections_and_iterators\n')] |
import os
import random
import pytest
from fastapi.testclient import TestClient
from jsonschema import Draft7Validator
from pathlib import Path
from main import app
from app.test.schema.schema_generator import test_generate_product_schema
client = TestClient(app)
current_path = Path(__file__).resolve().parent
@pytest.mark.generate_product
def test_generate_product():
img_name = random.choice(os.listdir(str(Path(current_path, "img/"))))
img_path = str(Path(current_path, "img/", img_name))
response = client.post(
"/product/generate",
files={"img": (img_name, open(img_path, "rb"), "image/jpeg")},
)
assert response.status_code == 200
assert Draft7Validator(test_generate_product_schema).is_valid(response.json())
| [
"jsonschema.Draft7Validator",
"fastapi.testclient.TestClient",
"pathlib.Path"
] | [((251, 266), 'fastapi.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (261, 266), False, 'from fastapi.testclient import TestClient\n'), ((469, 505), 'pathlib.Path', 'Path', (['current_path', '"""img/"""', 'img_name'], {}), "(current_path, 'img/', img_name)\n", (473, 505), False, 'from pathlib import Path\n'), ((283, 297), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (287, 297), False, 'from pathlib import Path\n'), ((693, 738), 'jsonschema.Draft7Validator', 'Draft7Validator', (['test_generate_product_schema'], {}), '(test_generate_product_schema)\n', (708, 738), False, 'from jsonschema import Draft7Validator\n'), ((420, 446), 'pathlib.Path', 'Path', (['current_path', '"""img/"""'], {}), "(current_path, 'img/')\n", (424, 446), False, 'from pathlib import Path\n')] |
from sanic.response import json
from sanic import Blueprint
from sanic_openapi import doc
health_blueprint = Blueprint('health', url_prefix='/health', strict_slashes=True)
@health_blueprint.route('/', methods=['GET', 'OPTIONS'])
@doc.summary("Get health and stats about the service.")
@doc.produces(json)
async def health(request):
"""
Get health and stats about the service.
:return: sanic.response.json
"""
r = dict(
health='green'
)
return json(r)
| [
"sanic_openapi.doc.summary",
"sanic.response.json",
"sanic.Blueprint",
"sanic_openapi.doc.produces"
] | [((111, 173), 'sanic.Blueprint', 'Blueprint', (['"""health"""'], {'url_prefix': '"""/health"""', 'strict_slashes': '(True)'}), "('health', url_prefix='/health', strict_slashes=True)\n", (120, 173), False, 'from sanic import Blueprint\n'), ((234, 288), 'sanic_openapi.doc.summary', 'doc.summary', (['"""Get health and stats about the service."""'], {}), "('Get health and stats about the service.')\n", (245, 288), False, 'from sanic_openapi import doc\n'), ((290, 308), 'sanic_openapi.doc.produces', 'doc.produces', (['json'], {}), '(json)\n', (302, 308), False, 'from sanic_openapi import doc\n'), ((486, 493), 'sanic.response.json', 'json', (['r'], {}), '(r)\n', (490, 493), False, 'from sanic.response import json\n')] |
"""The actual schema part of `json_schema`."""
from itertools import izip
from yape.json_schema.tokens import token_stream
json_types = (
list, dict, unicode, int, long, bool, type(None)
)
class SchemaError(Exception):
pass
class UnexpectedToken(SchemaError):
def __init__(self, token, expected=None):
self.token = token
message = repr(token)
if expected is not None:
self.expected = expected
message += " (expected %r)" % (expected,)
super(UnexpectedToken, self).__init__(message)
class Schema(object):
def __init__(self, value):
self.tokens = list(token_stream(value))
def validate(self, value):
"""Validate *value* against the schema."""
return self.validate_tokens(token_stream(value))
def validate_tokens(self, tokens):
"""Validate *tokens* against the schema."""
for schema_token, real_token in izip(self.tokens, tokens):
if schema_token != real_token:
return False, schema_token, real_token
return True, None, None
class SchemaCollectionType(type):
def __new__(self, name, bases, attrs):
schema_attnames = (
attr for attr in attrs
if not attr.startswith("_") and isinstance(attrs[attr], json_types)
)
schemas = dict(
(attname, Schema(attrs[attname])) for attname in schema_attnames
)
attrs["schemas"] = schemas
attrs.update(schemas)
return super(SchemaCollectionType, self).__new__(self, name, bases, attrs)
class SchemaCollection(object):
__metaclass__ = SchemaCollectionType
@classmethod
def match_to_schema(cls, value):
tokens = list(token_stream(value))
for schema_name, schema in cls.schemas.iteritems():
valid, expected, got = schema.validate_tokens(tokens)
if valid:
return schema_name
| [
"yape.json_schema.tokens.token_stream",
"itertools.izip"
] | [((935, 960), 'itertools.izip', 'izip', (['self.tokens', 'tokens'], {}), '(self.tokens, tokens)\n', (939, 960), False, 'from itertools import izip\n'), ((642, 661), 'yape.json_schema.tokens.token_stream', 'token_stream', (['value'], {}), '(value)\n', (654, 661), False, 'from yape.json_schema.tokens import token_stream\n'), ((782, 801), 'yape.json_schema.tokens.token_stream', 'token_stream', (['value'], {}), '(value)\n', (794, 801), False, 'from yape.json_schema.tokens import token_stream\n'), ((1737, 1756), 'yape.json_schema.tokens.token_stream', 'token_stream', (['value'], {}), '(value)\n', (1749, 1756), False, 'from yape.json_schema.tokens import token_stream\n')] |
import unittest
from lib_db import DBClient
class TestDBClient(unittest.TestCase):
client: DBClient
def setUp(self) -> None:
self.client = DBClient()
def test_integrity(self):
all_peer_ids = set(self.client.get_all_peer_ids())
online_peer_ids = set(self.client.get_online_peer_ids())
self.assertTrue(online_peer_ids.issubset(all_peer_ids))
offline_peer_ids = set(self.client.get_offline_peer_ids())
self.assertTrue(offline_peer_ids.issubset(all_peer_ids))
all_entering_peer_ids = set(self.client.get_all_entering_peer_ids())
self.assertTrue(all_entering_peer_ids.issubset(all_peer_ids))
self.assertTrue(all_entering_peer_ids.isdisjoint(online_peer_ids))
self.assertTrue(all_entering_peer_ids.isdisjoint(offline_peer_ids))
all_leaving_peer_ids = set(self.client.get_all_leaving_peer_ids())
self.assertTrue(all_leaving_peer_ids.issubset(all_peer_ids))
self.assertTrue(all_leaving_peer_ids.isdisjoint(online_peer_ids))
self.assertTrue(all_leaving_peer_ids.isdisjoint(offline_peer_ids))
# The following needn't be necessarily true but unlikely that it isn't
self.assertTrue(len(all_entering_peer_ids.intersection(all_leaving_peer_ids)) > 0)
only_entering_peer_ids = set(self.client.get_only_entering_peer_ids())
self.assertTrue(only_entering_peer_ids.issubset(all_peer_ids))
self.assertTrue(only_entering_peer_ids.isdisjoint(online_peer_ids))
self.assertTrue(only_entering_peer_ids.isdisjoint(offline_peer_ids))
self.assertTrue(only_entering_peer_ids.isdisjoint(all_leaving_peer_ids))
self.assertTrue(only_entering_peer_ids.issubset(all_entering_peer_ids))
only_leaving_peer_ids = set(self.client.get_only_leaving_peer_ids())
self.assertTrue(only_leaving_peer_ids.issubset(all_peer_ids))
self.assertTrue(only_leaving_peer_ids.isdisjoint(online_peer_ids))
self.assertTrue(only_leaving_peer_ids.isdisjoint(offline_peer_ids))
self.assertTrue(only_leaving_peer_ids.isdisjoint(all_entering_peer_ids))
self.assertTrue(only_leaving_peer_ids.issubset(all_leaving_peer_ids))
ephemeral_peer_ids = set(self.client.get_ephemeral_peer_ids())
self.assertTrue(ephemeral_peer_ids.issubset(all_entering_peer_ids))
self.assertTrue(ephemeral_peer_ids.issubset(all_leaving_peer_ids))
dangling_peer_ids = set(self.client.get_dangling_peer_ids())
self.assertTrue(dangling_peer_ids.issubset(all_peer_ids))
self.assertTrue(dangling_peer_ids.isdisjoint(online_peer_ids))
self.assertTrue(dangling_peer_ids.isdisjoint(offline_peer_ids))
self.assertTrue(dangling_peer_ids.issubset(all_entering_peer_ids))
self.assertTrue(dangling_peer_ids.issubset(all_leaving_peer_ids))
oneoff_peer_ids = set(self.client.get_oneoff_peer_ids())
self.assertTrue(oneoff_peer_ids.issubset(all_peer_ids))
self.assertTrue(oneoff_peer_ids.isdisjoint(online_peer_ids))
self.assertTrue(oneoff_peer_ids.isdisjoint(offline_peer_ids))
self.assertTrue(oneoff_peer_ids.isdisjoint(dangling_peer_ids))
self.assertTrue(oneoff_peer_ids.issubset(all_entering_peer_ids))
self.assertTrue(oneoff_peer_ids.issubset(all_leaving_peer_ids))
calculated_all_peer_ids = oneoff_peer_ids | online_peer_ids | offline_peer_ids | only_entering_peer_ids | only_leaving_peer_ids | dangling_peer_ids
self.assertEqual(len(all_peer_ids), len(calculated_all_peer_ids))
self.assertEqual(all_peer_ids, calculated_all_peer_ids)
def test_get_all_peer_ids_for_all_agent_versions(self):
all_agent_versions = self.client.get_all_agent_versions()
all_peer_ids_by_all_agent_versions = set(self.client.get_peer_ids_for_agent_versions(all_agent_versions))
online_peer_ids = set(self.client.get_online_peer_ids())
all_entering_peer_ids = set(self.client.get_all_entering_peer_ids())
dangling_peer_ids = set(self.client.get_dangling_peer_ids())
self.assertTrue(online_peer_ids.issubset(all_peer_ids_by_all_agent_versions))
self.assertTrue(all_entering_peer_ids.issubset(all_peer_ids_by_all_agent_versions))
self.assertTrue(dangling_peer_ids.issubset(all_peer_ids_by_all_agent_versions))
# Now there can be nodes that started their session before
# the beginning of the time interval, were then "crawlable" (we
# could extract the agent version) and then left.
left_peer_ids = all_peer_ids_by_all_agent_versions - online_peer_ids - all_entering_peer_ids - dangling_peer_ids
only_leaving_peer_ids = set(self.client.get_only_leaving_peer_ids())
self.assertTrue(left_peer_ids.issubset(only_leaving_peer_ids))
# TODO: there is a minor bug in the time calculation of session start/ends. When that's fixed:
# self.assertEqual(left_peer_ids, only_leaving_peer_ids)
def test_agent_version_queries(self):
agent_version_distribution = self.client.get_agent_versions_distribution()
agent_version = agent_version_distribution[0][0]
agent_version_count = agent_version_distribution[0][1]
peer_ids_by_agent_version = self.client.get_peer_ids_for_agent_versions([agent_version])
self.assertEqual(agent_version_count, len(peer_ids_by_agent_version))
agent_versions_for_peer_ids = self.client.get_agent_versions_for_peer_ids(peer_ids_by_agent_version)
self.assertEqual(agent_versions_for_peer_ids[0][1],
agent_version_count) # we only queried for peers with one agent
def test_geo_integrity(self):
import pandas as pd
all_peer_ids = set(self.client.get_all_peer_ids())
no_public_ip_peer_ids = set(self.client.get_no_public_ip_peer_ids())
self.assertTrue(no_public_ip_peer_ids.issubset(all_peer_ids))
countries = self.client.get_countries()
countries_peer_ids = set(pd.DataFrame(countries, columns=["peer_id", "country"])["peer_id"].unique())
self.assertTrue(countries_peer_ids.issubset(all_peer_ids))
self.assertTrue(countries_peer_ids.isdisjoint(no_public_ip_peer_ids))
countries_with_relays = self.client.get_countries_with_relays()
countries_with_relays_peer_ids = set(
pd.DataFrame(countries_with_relays, columns=["peer_id", "country"])["peer_id"].unique())
self.assertTrue(countries_with_relays_peer_ids.issubset(all_peer_ids))
self.assertTrue(countries_with_relays_peer_ids.isdisjoint(no_public_ip_peer_ids))
self.assertTrue(countries_peer_ids.issubset(countries_with_relays_peer_ids))
unresolved_peer_ids = set(self.client.get_unresolved_peer_ids())
self.assertTrue(unresolved_peer_ids.issubset(all_peer_ids))
self.assertTrue(unresolved_peer_ids.isdisjoint(no_public_ip_peer_ids))
self.assertTrue(unresolved_peer_ids.isdisjoint(countries_peer_ids))
self.assertTrue(unresolved_peer_ids.isdisjoint(countries_with_relays_peer_ids))
calculated_all = no_public_ip_peer_ids | countries_peer_ids | countries_with_relays_peer_ids | unresolved_peer_ids
self.assertEqual(all_peer_ids, calculated_all)
def test_flatten(self):
flattened = DBClient._DBClient__flatten([(1,), (2,)])
self.assertListEqual(flattened, [1, 2])
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"lib_db.DBClient._DBClient__flatten",
"lib_db.DBClient",
"pandas.DataFrame"
] | [((7464, 7479), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7477, 7479), False, 'import unittest\n'), ((159, 169), 'lib_db.DBClient', 'DBClient', ([], {}), '()\n', (167, 169), False, 'from lib_db import DBClient\n'), ((7341, 7382), 'lib_db.DBClient._DBClient__flatten', 'DBClient._DBClient__flatten', (['[(1,), (2,)]'], {}), '([(1,), (2,)])\n', (7368, 7382), False, 'from lib_db import DBClient\n'), ((6031, 6086), 'pandas.DataFrame', 'pd.DataFrame', (['countries'], {'columns': "['peer_id', 'country']"}), "(countries, columns=['peer_id', 'country'])\n", (6043, 6086), True, 'import pandas as pd\n'), ((6384, 6451), 'pandas.DataFrame', 'pd.DataFrame', (['countries_with_relays'], {'columns': "['peer_id', 'country']"}), "(countries_with_relays, columns=['peer_id', 'country'])\n", (6396, 6451), True, 'import pandas as pd\n')] |
import numpy as np
from sklearn import svm
class character:
def __init__(self, raw_character):
self.identity = None
def recognize(characters, classifier):
for char in characters:
data = np.reshape(char.image_centered, np.prod(char.image_centered.shape)).reshape(1, -1)
char.identity = classifier.predict(data)
return characters
| [
"numpy.prod"
] | [((253, 287), 'numpy.prod', 'np.prod', (['char.image_centered.shape'], {}), '(char.image_centered.shape)\n', (260, 287), True, 'import numpy as np\n')] |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import logging
def add2(item, mpc):
"""
主要入口
"""
items = listall2(item, mpc)
items = _filter(items, mpc)
return items
def _filter(items, mpc):
occupied = []
playlists = []
for item in items:
if 'playlist' in item:
try:
# result+=mpc.listplaylistinfo(item['playlist'])
occupied += mpc.listplaylist(item['playlist'])
playlists.append(item)
except BaseException:
# tak文件会归类为playlist,但是似乎无法list,所以应该算作file处理?
if item['playlist'].endswith('.tak'):
item['file'] = item['playlist']
else:
open('/dev/shm/123', 'w').write(str(item))
print(item)
a = 1 / 0
occupied = set(occupied)
non_occupied = []
for item in items:
if 'file' in item:
if item['file'] not in occupied:
non_occupied.append(item)
return non_occupied + playlists
def listall1(item, mpc):
if 'directory' in item:
return mpc.listallinfo(item['directory'])
else:
return [item]
def listall2(item, mpc, result=None):
if None == result:
result = []
if 'directory' in item:
try:
uri = item['directory']
if uri == '..':
return []
children = mpc.lsinfo(uri)
except BaseException:
# 可以强制加入utf8编码错误的文件,但是加入后,ncmpy的listview会出错,无法显示。
children = mpc.listall(item['directory'])
children = [item for item in children if 'file' in item]
for child in children:
listall2(child, mpc, result)
else:
logging.debug('add single item:%s', item)
result.append(item)
return result
| [
"logging.debug"
] | [((1755, 1796), 'logging.debug', 'logging.debug', (['"""add single item:%s"""', 'item'], {}), "('add single item:%s', item)\n", (1768, 1796), False, 'import logging\n')] |
import datetime
SESSION_LIFETIME = datetime.timedelta(minutes=5)
| [
"datetime.timedelta"
] | [((35, 64), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(5)'}), '(minutes=5)\n', (53, 64), False, 'import datetime\n')] |
# -*- coding: utf-8 -*-
from skimage.viewer import utils
from skimage.viewer.utils import dialogs
from skimage.viewer.qt import QtCore, QtGui, has_qt
from numpy.testing.decorators import skipif
@skipif(not has_qt)
def test_event_loop():
utils.init_qtapp()
timer = QtCore.QTimer()
timer.singleShot(10, QtGui.QApplication.quit)
utils.start_qtapp()
@skipif(not has_qt)
def test_format_filename():
fname = dialogs._format_filename(('apple', 2))
assert fname == 'apple'
fname = dialogs._format_filename('')
assert fname is None
@skipif(not has_qt)
def test_open_file_dialog():
utils.init_qtapp()
timer = QtCore.QTimer()
timer.singleShot(100, lambda: QtGui.QApplication.quit())
filename = dialogs.open_file_dialog()
assert filename is None
@skipif(not has_qt)
def test_save_file_dialog():
utils.init_qtapp()
timer = QtCore.QTimer()
timer.singleShot(100, lambda: QtGui.QApplication.quit())
filename = dialogs.save_file_dialog()
assert filename is None
| [
"skimage.viewer.utils.init_qtapp",
"numpy.testing.decorators.skipif",
"skimage.viewer.utils.dialogs._format_filename",
"skimage.viewer.utils.dialogs.open_file_dialog",
"skimage.viewer.qt.QtCore.QTimer",
"skimage.viewer.utils.dialogs.save_file_dialog",
"skimage.viewer.utils.start_qtapp",
"skimage.viewe... | [((197, 215), 'numpy.testing.decorators.skipif', 'skipif', (['(not has_qt)'], {}), '(not has_qt)\n', (203, 215), False, 'from numpy.testing.decorators import skipif\n'), ((367, 385), 'numpy.testing.decorators.skipif', 'skipif', (['(not has_qt)'], {}), '(not has_qt)\n', (373, 385), False, 'from numpy.testing.decorators import skipif\n'), ((562, 580), 'numpy.testing.decorators.skipif', 'skipif', (['(not has_qt)'], {}), '(not has_qt)\n', (568, 580), False, 'from numpy.testing.decorators import skipif\n'), ((795, 813), 'numpy.testing.decorators.skipif', 'skipif', (['(not has_qt)'], {}), '(not has_qt)\n', (801, 813), False, 'from numpy.testing.decorators import skipif\n'), ((243, 261), 'skimage.viewer.utils.init_qtapp', 'utils.init_qtapp', ([], {}), '()\n', (259, 261), False, 'from skimage.viewer import utils\n'), ((274, 289), 'skimage.viewer.qt.QtCore.QTimer', 'QtCore.QTimer', ([], {}), '()\n', (287, 289), False, 'from skimage.viewer.qt import QtCore, QtGui, has_qt\n'), ((344, 363), 'skimage.viewer.utils.start_qtapp', 'utils.start_qtapp', ([], {}), '()\n', (361, 363), False, 'from skimage.viewer import utils\n'), ((426, 464), 'skimage.viewer.utils.dialogs._format_filename', 'dialogs._format_filename', (["('apple', 2)"], {}), "(('apple', 2))\n", (450, 464), False, 'from skimage.viewer.utils import dialogs\n'), ((505, 533), 'skimage.viewer.utils.dialogs._format_filename', 'dialogs._format_filename', (['""""""'], {}), "('')\n", (529, 533), False, 'from skimage.viewer.utils import dialogs\n'), ((614, 632), 'skimage.viewer.utils.init_qtapp', 'utils.init_qtapp', ([], {}), '()\n', (630, 632), False, 'from skimage.viewer import utils\n'), ((645, 660), 'skimage.viewer.qt.QtCore.QTimer', 'QtCore.QTimer', ([], {}), '()\n', (658, 660), False, 'from skimage.viewer.qt import QtCore, QtGui, has_qt\n'), ((737, 763), 'skimage.viewer.utils.dialogs.open_file_dialog', 'dialogs.open_file_dialog', ([], {}), '()\n', (761, 763), False, 'from skimage.viewer.utils import dialogs\n'), ((847, 865), 'skimage.viewer.utils.init_qtapp', 'utils.init_qtapp', ([], {}), '()\n', (863, 865), False, 'from skimage.viewer import utils\n'), ((878, 893), 'skimage.viewer.qt.QtCore.QTimer', 'QtCore.QTimer', ([], {}), '()\n', (891, 893), False, 'from skimage.viewer.qt import QtCore, QtGui, has_qt\n'), ((970, 996), 'skimage.viewer.utils.dialogs.save_file_dialog', 'dialogs.save_file_dialog', ([], {}), '()\n', (994, 996), False, 'from skimage.viewer.utils import dialogs\n'), ((695, 720), 'skimage.viewer.qt.QtGui.QApplication.quit', 'QtGui.QApplication.quit', ([], {}), '()\n', (718, 720), False, 'from skimage.viewer.qt import QtCore, QtGui, has_qt\n'), ((928, 953), 'skimage.viewer.qt.QtGui.QApplication.quit', 'QtGui.QApplication.quit', ([], {}), '()\n', (951, 953), False, 'from skimage.viewer.qt import QtCore, QtGui, has_qt\n')] |
import sys
def utr_selection(transcripts, log):
"""UTR selection function"""
tmp = []
for t in transcripts:
t.utr5_exons = t.utr5_regions()
t.utr3_exons = t.utr3_regions()
t.utr5_start = t.start if t.strand == '+' else t.end - 1
t.utr3_end = t.end - 1 if t.strand == '+' else t.start
t.utr5_exonic_content_length = sum([e[1] - e[0] for e in t.utr5_exons])
t.utr3_exonic_content_length = sum([e[1] - e[0] for e in t.utr3_exons])
t.utr_ends = [t.utr5_start, t.utr3_end]
tmp.append(t)
transcripts = tmp
log.write('\nUTR criteria applied to the following transcripts:\n')
for t in transcripts:
log.write('- '+t.id+'\n')
log.write(' ' + str(t.strand) +'; UTR5 exons: ' + str(len(t.utr5_exons)) +'; UTR5 start: ' + str(t.utr5_start) + '; UTR5 length: ' + str(t.utr5_exonic_content_length) + '; UTR3 end: ' + str(t.utr3_end) + '; UTR3 length: ' + str(t.utr3_exonic_content_length) + '\n')
log.write('\nUTR criteria selection steps:\n')
dtypes = []
dcrits = []
candidates = transcripts
while len(candidates) != 1:
candidates, dtype, dcrit = select_by_type(candidates, log)
if candidates is None:
return None, None, None
dtypes.append(dtype)
dcrits.append(dcrit)
log.write(' Filtered to '+str([t.id for t in candidates])+'\n')
log.write('\n')
return candidates[0], ','.join(dtypes), ','.join(dcrits)
def select_by_type(transcripts, log):
"""Filter transcripts depending on different type"""
# Difference types: UTR5_number and UTR5_boundary
candidates, dtype, dcrit = analyse_difference_type_utr5_number_or_boundary(transcripts, log)
if candidates is not None:
return candidates, dtype, dcrit
# Difference type: UTR_ends
candidates, dtype, dcrit = analyse_difference_type_UTR_ends(transcripts, log)
if candidates is not None:
return candidates, dtype, dcrit
# Difference type: UTR3
return analyse_difference_type_UTR3(transcripts, log)
def analyse_difference_type_utr5_number_or_boundary(transcripts, log):
"""Analyse difference types UTR5 number or boundary"""
# Check if there is difference in the number of UTR5 exons between any transcripts
diff_utr5_number = False
for i in range(1, len(transcripts)):
if len(transcripts[0].utr5_exons) != len(transcripts[i].utr5_exons):
diff_utr5_number = True
break
# Check if there is difference in the UTR5 exon boundaries between any transcripts
diff_utr5_boundary = False
if not diff_utr5_number:
for i in range(1, len(transcripts)):
if different_utr5_boundary(transcripts[0], transcripts[i]):
diff_utr5_boundary = True
break
# Filtering
if diff_utr5_number or diff_utr5_boundary:
# Difference type
difftype = 'UTR5_number' if diff_utr5_number else 'UTR5_boundary'
log.write(' * Difference type: ' + difftype + '\n')
# Choose the transcript with the largest 5' footprint
candidates = select_largest_5prime_footprint(transcripts)
if len(candidates) < len(transcripts):
log.write(' Applied criteria: UTR5 start\n')
return candidates, difftype, 'UTR5_start'
# Choose the transcript with the longest 5' UTR exonic content
candidates = select_longest_utr5_exonic_content(transcripts)
if len(candidates) < len(transcripts):
log.write(' Applied criteria: UTR5 length\n')
return candidates, difftype, 'UTR5_length'
# Choose the transcript with the most 5' UTR boundary
candidates = select_utr5_first_boundary(transcripts)
if len(candidates) < len(transcripts):
log.write(' Applied criteria: UTR5 boundary\n')
return candidates, difftype, 'UTR5_boundary'
sys.exit('Error 1')
return None, None, None
def analyse_difference_type_UTR_ends(transcripts, log):
"""Analyse difference type UTR ends"""
# Check if there is difference in the UTR ends between any transcripts
diff_utr_ends = False
ue = transcripts[0].utr_ends
for i in range(1, len(transcripts)):
if not transcripts[i].utr_ends == ue:
diff_utr_ends = True
break
# Filtering
if diff_utr_ends:
log.write(' * Difference type: UTR_ends\n')
# Choose the transcript with the largest 5' footprint
candidates = select_largest_5prime_footprint(transcripts)
if len(candidates) < len(transcripts):
log.write(' Applied criteria: UTR5 start\n')
return candidates, 'UTR_ends', 'UTR5_start'
# Choose the transcript with the largest 3' footprint
candidates = select_largest_3prime_footprint(transcripts)
if len(candidates) < len(transcripts):
log.write(' Applied criteria: UTR3 end\n')
return candidates, 'UTR_ends', 'UTR3_end'
# Choose the transcript with the longest 3' UTR exonic content
candidates = select_longest_utr3_exonic_content(transcripts)
if len(candidates) < len(transcripts):
log.write(' Applied criteria: UTR3 length\n')
return candidates, 'UTR_ends', 'UTR3_length'
# Choose the transcript with the most 3' UTR boundary
candidates = select_utr3_first_boundary(transcripts)
if len(candidates) < len(transcripts):
log.write(' Applied criteria: UTR3 boundary\n')
return candidates, 'UTR_ends', 'UTR3_boundary'
sys.exit('Error 2')
return None, None, None
def analyse_difference_type_UTR3(transcripts, log):
"""Analyse difference tyoe UTR3"""
# Check if there is difference in the UTR3 between any transcripts
diff_utr3 = False
for i in range(1, len(transcripts)):
if different_utr3(transcripts[0], transcripts[i]):
diff_utr3 = True
break
# Filtering
if diff_utr3:
log.write(' * Difference type: UTR3\n')
# Choose the transcript with the largest 3' footprint
candidates = select_largest_3prime_footprint(transcripts)
if len(candidates) < len(transcripts):
log.write(' Applied criteria: UTR3 end\n')
return candidates, 'UTR3', 'UTR3_end'
# Choose the transcript with the longest 3' UTR exonic content
candidates = select_longest_utr3_exonic_content(transcripts)
if len(candidates) < len(transcripts):
log.write(' Applied criteria: UTR3 length\n')
return candidates, 'UTR3', 'UTR3_length'
# Choose the transcript with the most 3' UTR boundary
candidates = select_utr3_first_boundary(transcripts)
if len(candidates) < len(transcripts):
log.write(' Applied criteria: UTR3 boundary\n')
return candidates, 'UTR3', 'UTR3_boundary'
sys.exit('Error 3')
return None, None, None
def different_utr5_boundary(transcript1, transcript2):
"""Check if two transcripts have different UTR5 exon boundaries"""
for i in range(len(transcript1.utr5_exons)):
exon1 = transcript1.utr5_exons[i]
exon2 = transcript2.utr5_exons[i]
if exon1[0] != exon2[0] or exon1[1] != exon2[1]:
return True
return False
def different_utr3(transcript1, transcript2):
"""Check if two transcripts have different UTR3 exon boundaries"""
if len(transcript1.utr3_exons) != len(transcript2.utr3_exons):
return True
for i in range(len(transcript1.utr3_exons)):
exon1 = transcript1.utr3_exons[i]
exon2 = transcript2.utr3_exons[i]
if exon1[0] != exon2[0] or exon1[1] != exon2[1]:
return True
return False
def select_largest_5prime_footprint(transcripts):
"""Select transcript(s) with largest 5' footprint"""
ret = [transcripts[0]]
furthest = transcripts[0].utr5_start
for i in range(1, len(transcripts)):
t = transcripts[i]
utr5footprint = t.utr5_start
if utr5footprint == furthest:
ret.append(t)
elif (t.strand == '+' and utr5footprint < furthest) or (t.strand == '-' and utr5footprint > furthest):
furthest = utr5footprint
ret = [t]
return ret
def select_largest_3prime_footprint(transcripts):
"""Select transcript(s) with largest 3' footprint"""
ret = [transcripts[0]]
furthest = transcripts[0].utr3_end
for i in range(1, len(transcripts)):
t = transcripts[i]
utr3footprint = t.utr3_end
if utr3footprint == furthest:
ret.append(t)
elif (t.strand == '+' and utr3footprint > furthest) or (t.strand == '-' and utr3footprint < furthest):
furthest = utr3footprint
ret = [t]
return ret
def select_longest_utr5_exonic_content(transcripts):
"""Select transcript(s) with longest UTR5 exonic content"""
ret = [transcripts[0]]
longest = transcripts[0].utr5_exonic_content_length
for i in range(1, len(transcripts)):
t = transcripts[i]
exonic_content_length = t.utr5_exonic_content_length
if exonic_content_length == longest:
ret.append(t)
elif exonic_content_length > longest:
longest = exonic_content_length
ret = [t]
return ret
def select_longest_utr3_exonic_content(transcripts):
"""Select transcript(s) with longest UTR3 exonic content"""
ret = [transcripts[0]]
longest = transcripts[0].utr3_exonic_content_length
for i in range(1, len(transcripts)):
t = transcripts[i]
exonic_content_length = t.utr3_exonic_content_length
if exonic_content_length == longest:
ret.append(t)
elif exonic_content_length > longest:
longest = exonic_content_length
ret = [t]
return ret
def select_utr5_first_boundary(transcripts):
"""Select transcript(s) with most 5' UTR boundary"""
ret = [transcripts[0]]
if transcripts[0].strand == '+':
most5prime_exons = transcripts[0].utr5_exons
else:
most5prime_exons = [[x[1],x[0]] for x in transcripts[0].utr5_exons]
for i in range(1, len(transcripts)):
t = transcripts[i]
if t.strand == '+':
exons = t.utr5_exons
else:
exons = [[x[1],x[0]] for x in t.utr5_exons]
if most5prime_exons == exons:
ret.append(t)
elif (t.strand == '+' and exons < most5prime_exons) or (t.strand == '-' and exons > most5prime_exons):
ret = [t]
most5prime_exons = exons
return ret
def select_utr3_first_boundary(transcripts):
"""Select transcript(s) with most 3' UTR boundary"""
ret = [transcripts[0]]
if transcripts[0].strand == '+':
most3prime_exons = [[x[1],x[0]] for x in transcripts[0].utr3_exons[::-1]]
else:
most3prime_exons = transcripts[0].utr3_exons[::-1]
for i in range(1, len(transcripts)):
t = transcripts[i]
if t.strand == '+':
exons = [[x[1],x[0]] for x in t.utr3_exons[::-1]]
else:
exons = t.utr3_exons[::-1]
if most3prime_exons == exons:
ret.append(t)
elif (t.strand == '+' and exons > most3prime_exons) or (t.strand == '-' and exons < most3prime_exons):
ret = [t]
most3prime_exons = exons
return ret
| [
"sys.exit"
] | [((3944, 3963), 'sys.exit', 'sys.exit', (['"""Error 1"""'], {}), "('Error 1')\n", (3952, 3963), False, 'import sys\n'), ((5643, 5662), 'sys.exit', 'sys.exit', (['"""Error 2"""'], {}), "('Error 2')\n", (5651, 5662), False, 'import sys\n'), ((6991, 7010), 'sys.exit', 'sys.exit', (['"""Error 3"""'], {}), "('Error 3')\n", (6999, 7010), False, 'import sys\n')] |
import multiprocessing
import os
import subprocess
import sys
import threading
import time
import sdat.tmp as sdat
try:
from Queue import Queue #for python2
except:
from queue import Queue #for python3
def cell():
print("")
print("##############################################################################################")
print("")
print(" SDAT: SPLiT-seq Data Analysis Toolkit")
print("")
print(" A python program(suitable for python2 and python3)")
print("")
print(" Please contact <EMAIL> when questions arise.")
print("")
print("##############################################################################################")
def rmdup_cell(chr, directory):
pname = multiprocessing.current_process().name
print('[%s]' % time.strftime("%Y-%m-%d %X",
time.localtime()) + ' %s is running to deduplicate' % pname + '\033[1;35m %s \033[0m' % chr)
sdat.sam_rmdup('%s/tmp/sam/%s.sam' % (directory, chr), '%s/tmp/rmdup/%s.sam' % (directory, chr),
'%s/tmp/dup/%s.sam' % (directory, chr), '%s/tmp/log1/%s.log' % (directory, chr))
subprocess.Popen('rm %s/tmp/sam/%s.sam' % (directory, chr), shell=True)
print('[%s]' % time.strftime("%Y-%m-%d %X",
time.localtime()) + ' %s is running to split' % pname + '\033[1;35m %s \033[0m' % chr + 'to cell')
sdat.split_sam_to_cell('%s/tmp/rmdup/%s.sam' % (directory, chr), '%s/tmp/log2/%s.log' % (directory, chr),
'%s/tmp/cell/%s' % (directory, chr))
print('[%s]' % time.strftime("%Y-%m-%d %X",
time.localtime()) + ' %s finish spliting' % pname + '\033[1;35m %s \033[0m' % chr + 'to cell successfully')
def merge_cell_chr(directory, chr_list, n):
while True:
cell = task.get()
sdat.merge_cell('%s/tmp/cell' % directory, '%s/cell' % directory, chr_list, cell)
lock.acquire()
count[0] += 1
sys.stdout.write('[' + '#' * int((count[0] + 1) / (n / float(100))) + ' ' * (
100 - int((count[0] + 1) / (n / float(100)))) + ']' + '--[' + '\033[0;32;40m %s\033[0m' % count[0] + '/' + '\033[0;33;40m%s \033[0m' % n + '] Merging' + '\033[0;35;40m %s \033[0m' % cell + '\r')
sys.stdout.flush()
lock.release()
task.task_done()
def help_doc():
print("")
print(" Attention:")
print("")
print(" 'sdat cell':remove PCR duplication and split aligned reads to cell.")
print("")
print(" Usage:")
print("")
print(" sdat cell -b xxxx.bam or sdat cell -s xxxx.sam")
print("")
sys.exit(0)
if len(sys.argv) < 2:
help_doc()
i = 1
while i < len(sys.argv):
if sys.argv[i] == '-b':
try:
bam = sys.argv[i + 1]
if bam.endswith('.bam'):
if '/' in bam:
directory = bam.rstrip('/' + bam.split('/')[-1]) # directory
else:
directory = '.'
pre = bam.split('/')[-1].split('.bam')[0]
else:
print('bam file error!')
help_doc()
exit()
except:
print('bam file error!')
help_doc()
exit()
elif sys.argv[i] == '-s':
try:
sam = sys.argv[i + 1]
if sam.endswith('.sam'):
if '/' in sam:
directory = sam.rstrip('/' + sam.split('/')[-1]) # directory
else:
directory = '.'
pre = sam.split('/')[-1].split('.sam')[0]
else:
print('sam file error!')
help_doc()
exit()
except:
print('sam file error!')
help_doc()
exit()
i += 1
if '-b' not in sys.argv and '-s' not in sys.argv:
print('input file error!')
help_doc()
exit()
if '-b' in sys.argv:
if os.path.exists(bam):
pass
else:
print('bam file no found!')
help_doc()
exit()
elif '-s' in sys.argv:
if os.path.exists(sam):
pass
else:
print('sam file no found!')
help_doc()
exit()
print('[%s]' % time.strftime("%Y-%m-%d %X", time.localtime()) + '\033[1;32m sdat cell start running... \033[0m')
print('[%s]' % time.strftime("%Y-%m-%d %X", time.localtime()) + '\033[1;35m Start deduplicating... \033[0m')
subprocess.Popen('mkdir -p %s/tmp/sam' % directory, shell=True).wait()
subprocess.Popen('mkdir -p %s/tmp/rmdup' % directory, shell=True).wait()
subprocess.Popen('mkdir -p %s/tmp/dup' % directory, shell=True).wait()
subprocess.Popen('mkdir -p %s/tmp/cell' % directory, shell=True).wait()
subprocess.Popen('mkdir -p %s/tmp/log1' % directory, shell=True).wait() # rmdup log
subprocess.Popen('mkdir -p %s/tmp/log2' % directory, shell=True).wait() # reads number per cell log
print('[%s]' % time.strftime("%Y-%m-%d %X", time.localtime()) + '\033[1;35m Spliting sam files... \033[0m')
if '-b' in sys.argv:
chr_list = sdat.split_bam('%s/%s.bam' % (directory, pre),
'%s/tmp/sam' % directory) # split sam into several sam files in tmp/sam (by chromosome)
elif '-s' in sys.argv:
chr_list = sdat.split_sam('%s/%s.sam' % (directory, pre),
'%s/tmp/sam' % directory) # split sam into several sam files in tmp/sam (by chromosome)
chr_num = len(chr_list) # file number in tmp/sam
for chr in chr_list:
subprocess.Popen('mkdir %s/tmp/cell/%s' % (directory, chr), shell=True).wait()
process_list = []
for chr in chr_list:
p = multiprocessing.Process(target=rmdup_cell, args=(chr, directory))
p.daemon = True
p.start()
process_list.append(p)
for p in process_list:
p.join()
print('[%s]' % time.strftime("%Y-%m-%d %X", time.localtime()) + '\033[1;35m Start merging files... \033[0m')
print('[%s]' % time.strftime("%Y-%m-%d %X", time.localtime()) + '\033[1;35m Merging rmdup sam files... \033[0m')
sdat.merge_sam('%s/tmp/rmdup' % directory, '%s/%s_rmdup.sam' % (directory, pre), chr_list) # merge rmdup
print('[%s]' % time.strftime("%Y-%m-%d %X", time.localtime()) + '\033[1;35m Merging dup sam files ... \033[0m')
sdat.merge_sam('%s/tmp/dup' % directory, '%s/%s_dup.sam' % (directory, pre), chr_list) # merge dup
print('[%s]' % time.strftime("%Y-%m-%d %X", time.localtime()) + '\033[1;35m Merging rmdup log files... \033[0m')
sdat.merge_rmdup_log('%s/tmp/log1' % directory, '%s/%s_rmdup.log' % (directory, pre), chr_list)
dict_cell = {}
for chr in chr_list:
cells_tmp = [cell_file.split('.')[0] for cell_file in os.listdir('%s/tmp/cell/%s' % (directory, chr))]
for cell in cells_tmp:
dict_cell[cell] = 0
print('[%s]' % time.strftime("%Y-%m-%d %X",
time.localtime()) + '\033[1;35m Merging log of reads number pre cell... \033[0m')
sdat.merge_cell_log('%s/tmp/log2' % directory, '%s/Reads_number_per_cell.log' % directory, chr_list, dict_cell)
print('[%s]' % time.strftime("%Y-%m-%d %X", time.localtime()) + '\033[1;35m Merging reads per cell... \033[0m')
subprocess.Popen('mkdir %s/cell' % directory, shell=True).wait()
cells = [cell for cell in dict_cell.keys()]
n = len(cells)
count = [0] # for counting in merge_cell_chr function;nonlocal isn't available for python2
lock = threading.Lock() # a threading lock to avoid Thread conflict.
task = Queue()
for i in range(50):
t = threading.Thread(target=merge_cell_chr, args=(directory, chr_list, n))
t.daemon = True
t.start()
for cell in cells:
task.put(cell)
task.join()
subprocess.Popen('rm -r %s/tmp &' % directory, shell=True)
print('\n' + '[%s]' % time.strftime("%Y-%m-%d %X", time.localtime()) + '\033[1;32m Finished successfully\033[0m')
| [
"sdat.tmp.merge_sam",
"multiprocessing.Process",
"sdat.tmp.merge_rmdup_log",
"sdat.tmp.merge_cell",
"sdat.tmp.merge_cell_log",
"sdat.tmp.split_sam_to_cell",
"sys.exit",
"os.path.exists",
"os.listdir",
"sdat.tmp.sam_rmdup",
"threading.Lock",
"subprocess.Popen",
"sdat.tmp.split_sam",
"sys.st... | [((6536, 6630), 'sdat.tmp.merge_sam', 'sdat.merge_sam', (["('%s/tmp/rmdup' % directory)", "('%s/%s_rmdup.sam' % (directory, pre))", 'chr_list'], {}), "('%s/tmp/rmdup' % directory, '%s/%s_rmdup.sam' % (directory,\n pre), chr_list)\n", (6550, 6630), True, 'import sdat.tmp as sdat\n'), ((6762, 6852), 'sdat.tmp.merge_sam', 'sdat.merge_sam', (["('%s/tmp/dup' % directory)", "('%s/%s_dup.sam' % (directory, pre))", 'chr_list'], {}), "('%s/tmp/dup' % directory, '%s/%s_dup.sam' % (directory, pre),\n chr_list)\n", (6776, 6852), True, 'import sdat.tmp as sdat\n'), ((6983, 7083), 'sdat.tmp.merge_rmdup_log', 'sdat.merge_rmdup_log', (["('%s/tmp/log1' % directory)", "('%s/%s_rmdup.log' % (directory, pre))", 'chr_list'], {}), "('%s/tmp/log1' % directory, '%s/%s_rmdup.log' % (\n directory, pre), chr_list)\n", (7003, 7083), True, 'import sdat.tmp as sdat\n'), ((7464, 7580), 'sdat.tmp.merge_cell_log', 'sdat.merge_cell_log', (["('%s/tmp/log2' % directory)", "('%s/Reads_number_per_cell.log' % directory)", 'chr_list', 'dict_cell'], {}), "('%s/tmp/log2' % directory, \n '%s/Reads_number_per_cell.log' % directory, chr_list, dict_cell)\n", (7483, 7580), True, 'import sdat.tmp as sdat\n'), ((7935, 7951), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (7949, 7951), False, 'import threading\n'), ((8009, 8016), 'queue.Queue', 'Queue', ([], {}), '()\n', (8014, 8016), False, 'from queue import Queue\n'), ((8233, 8291), 'subprocess.Popen', 'subprocess.Popen', (["('rm -r %s/tmp &' % directory)"], {'shell': '(True)'}), "('rm -r %s/tmp &' % directory, shell=True)\n", (8249, 8291), False, 'import subprocess\n'), ((975, 1162), 'sdat.tmp.sam_rmdup', 'sdat.sam_rmdup', (["('%s/tmp/sam/%s.sam' % (directory, chr))", "('%s/tmp/rmdup/%s.sam' % (directory, chr))", "('%s/tmp/dup/%s.sam' % (directory, chr))", "('%s/tmp/log1/%s.log' % (directory, chr))"], {}), "('%s/tmp/sam/%s.sam' % (directory, chr), \n '%s/tmp/rmdup/%s.sam' % (directory, chr), '%s/tmp/dup/%s.sam' % (\n directory, chr), '%s/tmp/log1/%s.log' % (directory, chr))\n", (989, 1162), True, 'import sdat.tmp as sdat\n'), ((1184, 1255), 'subprocess.Popen', 'subprocess.Popen', (["('rm %s/tmp/sam/%s.sam' % (directory, chr))"], {'shell': '(True)'}), "('rm %s/tmp/sam/%s.sam' % (directory, chr), shell=True)\n", (1200, 1255), False, 'import subprocess\n'), ((1452, 1603), 'sdat.tmp.split_sam_to_cell', 'sdat.split_sam_to_cell', (["('%s/tmp/rmdup/%s.sam' % (directory, chr))", "('%s/tmp/log2/%s.log' % (directory, chr))", "('%s/tmp/cell/%s' % (directory, chr))"], {}), "('%s/tmp/rmdup/%s.sam' % (directory, chr), \n '%s/tmp/log2/%s.log' % (directory, chr), '%s/tmp/cell/%s' % (directory,\n chr))\n", (1474, 1603), True, 'import sdat.tmp as sdat\n'), ((2816, 2827), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2824, 2827), False, 'import sys\n'), ((4316, 4335), 'os.path.exists', 'os.path.exists', (['bam'], {}), '(bam)\n', (4330, 4335), False, 'import os\n'), ((5507, 5579), 'sdat.tmp.split_bam', 'sdat.split_bam', (["('%s/%s.bam' % (directory, pre))", "('%s/tmp/sam' % directory)"], {}), "('%s/%s.bam' % (directory, pre), '%s/tmp/sam' % directory)\n", (5521, 5579), True, 'import sdat.tmp as sdat\n'), ((6118, 6183), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'rmdup_cell', 'args': '(chr, directory)'}), '(target=rmdup_cell, args=(chr, directory))\n', (6141, 6183), False, 'import multiprocessing\n'), ((8053, 8123), 'threading.Thread', 'threading.Thread', ([], {'target': 'merge_cell_chr', 'args': '(directory, chr_list, n)'}), '(target=merge_cell_chr, args=(directory, chr_list, n))\n', (8069, 8123), False, 'import threading\n'), ((746, 779), 'multiprocessing.current_process', 'multiprocessing.current_process', ([], {}), '()\n', (777, 779), False, 'import multiprocessing\n'), ((1934, 2019), 'sdat.tmp.merge_cell', 'sdat.merge_cell', (["('%s/tmp/cell' % directory)", "('%s/cell' % directory)", 'chr_list', 'cell'], {}), "('%s/tmp/cell' % directory, '%s/cell' % directory, chr_list,\n cell)\n", (1949, 2019), True, 'import sdat.tmp as sdat\n'), ((2390, 2408), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2406, 2408), False, 'import sys\n'), ((4488, 4507), 'os.path.exists', 'os.path.exists', (['sam'], {}), '(sam)\n', (4502, 4507), False, 'import os\n'), ((4857, 4920), 'subprocess.Popen', 'subprocess.Popen', (["('mkdir -p %s/tmp/sam' % directory)"], {'shell': '(True)'}), "('mkdir -p %s/tmp/sam' % directory, shell=True)\n", (4873, 4920), False, 'import subprocess\n'), ((4932, 4997), 'subprocess.Popen', 'subprocess.Popen', (["('mkdir -p %s/tmp/rmdup' % directory)"], {'shell': '(True)'}), "('mkdir -p %s/tmp/rmdup' % directory, shell=True)\n", (4948, 4997), False, 'import subprocess\n'), ((5009, 5072), 'subprocess.Popen', 'subprocess.Popen', (["('mkdir -p %s/tmp/dup' % directory)"], {'shell': '(True)'}), "('mkdir -p %s/tmp/dup' % directory, shell=True)\n", (5025, 5072), False, 'import subprocess\n'), ((5084, 5148), 'subprocess.Popen', 'subprocess.Popen', (["('mkdir -p %s/tmp/cell' % directory)"], {'shell': '(True)'}), "('mkdir -p %s/tmp/cell' % directory, shell=True)\n", (5100, 5148), False, 'import subprocess\n'), ((5160, 5224), 'subprocess.Popen', 'subprocess.Popen', (["('mkdir -p %s/tmp/log1' % directory)"], {'shell': '(True)'}), "('mkdir -p %s/tmp/log1' % directory, shell=True)\n", (5176, 5224), False, 'import subprocess\n'), ((5249, 5313), 'subprocess.Popen', 'subprocess.Popen', (["('mkdir -p %s/tmp/log2' % directory)"], {'shell': '(True)'}), "('mkdir -p %s/tmp/log2' % directory, shell=True)\n", (5265, 5313), False, 'import subprocess\n'), ((5723, 5795), 'sdat.tmp.split_sam', 'sdat.split_sam', (["('%s/%s.sam' % (directory, pre))", "('%s/tmp/sam' % directory)"], {}), "('%s/%s.sam' % (directory, pre), '%s/tmp/sam' % directory)\n", (5737, 5795), True, 'import sdat.tmp as sdat\n'), ((7696, 7753), 'subprocess.Popen', 'subprocess.Popen', (["('mkdir %s/cell' % directory)"], {'shell': '(True)'}), "('mkdir %s/cell' % directory, shell=True)\n", (7712, 7753), False, 'import subprocess\n'), ((5980, 6051), 'subprocess.Popen', 'subprocess.Popen', (["('mkdir %s/tmp/cell/%s' % (directory, chr))"], {'shell': '(True)'}), "('mkdir %s/tmp/cell/%s' % (directory, chr), shell=True)\n", (5996, 6051), False, 'import subprocess\n'), ((7185, 7232), 'os.listdir', 'os.listdir', (["('%s/tmp/cell/%s' % (directory, chr))"], {}), "('%s/tmp/cell/%s' % (directory, chr))\n", (7195, 7232), False, 'import os\n'), ((4671, 4687), 'time.localtime', 'time.localtime', ([], {}), '()\n', (4685, 4687), False, 'import time\n'), ((4788, 4804), 'time.localtime', 'time.localtime', ([], {}), '()\n', (4802, 4804), False, 'import time\n'), ((5399, 5415), 'time.localtime', 'time.localtime', ([], {}), '()\n', (5413, 5415), False, 'import time\n'), ((6350, 6366), 'time.localtime', 'time.localtime', ([], {}), '()\n', (6364, 6366), False, 'import time\n'), ((6463, 6479), 'time.localtime', 'time.localtime', ([], {}), '()\n', (6477, 6479), False, 'import time\n'), ((6690, 6706), 'time.localtime', 'time.localtime', ([], {}), '()\n', (6704, 6706), False, 'import time\n'), ((6910, 6926), 'time.localtime', 'time.localtime', ([], {}), '()\n', (6924, 6926), False, 'import time\n'), ((7378, 7394), 'time.localtime', 'time.localtime', ([], {}), '()\n', (7392, 7394), False, 'import time\n'), ((7624, 7640), 'time.localtime', 'time.localtime', ([], {}), '()\n', (7638, 7640), False, 'import time\n'), ((8347, 8363), 'time.localtime', 'time.localtime', ([], {}), '()\n', (8361, 8363), False, 'import time\n'), ((874, 890), 'time.localtime', 'time.localtime', ([], {}), '()\n', (888, 890), False, 'import time\n'), ((1345, 1361), 'time.localtime', 'time.localtime', ([], {}), '()\n', (1359, 1361), False, 'import time\n'), ((1715, 1731), 'time.localtime', 'time.localtime', ([], {}), '()\n', (1729, 1731), False, 'import time\n')] |
import json
from flask import Flask, request
import requests
# Token that has to be generated from webhook page portal
ACCESS_TOKEN = "random <PASSWORD>"
# Token that has to be added for verification with developer portal
VERIFICATION_TOKEN = "abc"
# Identifier payloads for initial button
C19INDIA = "C19INDIA"
app = Flask(__name__)
# This get endpoint is for verification with messenger app
@app.route('/webhook', methods=['GET'])
def webhook():
verify_token = request.args.get("hub.verify_token")
if verify_token == VERIFICATION_TOKEN:
return request.args.get("hub.challenge")
return 'Unable to authorise.'
@app.route("/webhook", methods=['POST'])
def webhook_handle():
data = request.get_json()
if data["object"] == "page": # To verify that the request is being originated from a page
for entry in data["entry"]:
for event in entry["messaging"]:
if event.get("message"): # somebody typed a message
process_message(event)
# user clicked/tapped "postback" button in earlier message
elif event.get("postback"):
process_postback(event)
return 'ok'
def process_message(event):
# the facebook ID of the person sending you the message
sender_id = event["sender"]["id"]
# could receive text or attachment but not both
if "text" in event["message"]:
send_initial_menu(sender_id)
def send_initial_menu(sender_id):
message_data = json.dumps({
"recipient": {
"id": sender_id
},
"message": {
"attachment": {
"type": "template",
"payload": {
"template_type": "generic",
"elements": [{
"title": "Covid India Stats",
"subtitle": "Get the covid19 stats of Indian states",
"buttons": [{
"type": "web_url",
"url": "https://www.worldometers.info/coronavirus/country/india/",
"title": "Open Worldometer India"
}, {
"type": "postback",
"title": "Get Stats By Indian States",
"payload": C19INDIA,
}],
}]
}
}
}
})
call_send_api(message_data)
def send_state_list(sender_id):
message_data = json.dumps({
"recipient": {
"id": sender_id
},
"message": {
"attachment": {
"type": "template",
"payload": {
"template_type": "generic",
"elements": [{
"title": "Select State",
"buttons": create_state_list(1)
}, {
"title": "Select State",
"buttons": create_state_list(2)
}, {
"title": "Select State",
"buttons": create_state_list(3)
}, {
"title": "Select State",
"buttons": create_state_list(4)
}, {
"title": "Select State",
"buttons": create_state_list(5)
}, {
"title": "Select State",
"buttons": create_state_list(6)
}, {
"title": "Select State",
"buttons": create_state_list(7)
}, {
"title": "Select State",
"buttons": create_state_list(8)
}, {
"title": "Select State",
"buttons": create_state_list(9)
}, {
"title": "Select State",
"buttons": create_state_list(10)
}]
}
}
}
})
call_send_api(message_data)
def create_state_list(index):
state_list = ["Maharashtra", "Kerala", "Karnataka", "Andhra Pradesh", "Tamil Nadu", "Delhi", "Uttar Pradesh",
"West Bengal", "Odisha", "Rajasthan", "Chhattisgarh", "Telangana", "Haryana", "Gujarat", "Bihar",
"Madhya Pradesh", "Assam", "Punjab", "Jharkhand", "Uttarakhand", "Himachal Pradesh", "Goa", "Tripura",
"Manipur", "<NAME>", "Meghalaya", "Nagaland", "Sikkim", "Mizoram"]
payload_list = []
start_index = 0 + 3 * (index - 1)
end_index = 29 if (start_index + 3) > 29 else (start_index + 3)
for i in range(start_index, end_index):
postback = {}
postback["type"] = "postback"
postback["title"] = state_list[i]
postback["payload"] = state_list[i]
payload_list.append(postback)
return payload_list
def get_stats_send(sender_id, state):
response = json.loads(requests.get(
"https://api.covid19india.org/data.json").text)
list_state = response['statewise']
for i in list_state:
if i['state'] == state:
x = i
break
message_data = json.dumps({
"recipient": {
"id": sender_id
},
"message": {
"text": "ACTIVE CASES: {}\nCONFIRMED CASES: {}\nDEATHS: {}\nRECOVERED: {}".format(x['active'],
x['confirmed'],
x['deaths'],
x['recovered'])
}
})
call_send_api(message_data)
def process_postback(event):
sender_id = event["sender"]["id"]
payload = event["postback"]["payload"]
if payload == C19INDIA:
send_state_list(sender_id)
else:
get_stats_send(sender_id, payload)
def call_send_api(message_data):
params = {
"access_token": ACCESS_TOKEN
}
headers = {
"Content-Type": "application/json"
}
r = requests.post("https://graph.facebook.com/v5.0/me/messages",
params=params, headers=headers, data=message_data)
if __name__ == "__main__":
app.run()
| [
"flask.request.args.get",
"requests.post",
"flask.Flask",
"json.dumps",
"requests.get",
"flask.request.get_json"
] | [((318, 333), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (323, 333), False, 'from flask import Flask, request\n'), ((469, 505), 'flask.request.args.get', 'request.args.get', (['"""hub.verify_token"""'], {}), "('hub.verify_token')\n", (485, 505), False, 'from flask import Flask, request\n'), ((708, 726), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (724, 726), False, 'from flask import Flask, request\n'), ((1505, 1986), 'json.dumps', 'json.dumps', (["{'recipient': {'id': sender_id}, 'message': {'attachment': {'type':\n 'template', 'payload': {'template_type': 'generic', 'elements': [{\n 'title': 'Covid India Stats', 'subtitle':\n 'Get the covid19 stats of Indian states', 'buttons': [{'type':\n 'web_url', 'url':\n 'https://www.worldometers.info/coronavirus/country/india/', 'title':\n 'Open Worldometer India'}, {'type': 'postback', 'title':\n 'Get Stats By Indian States', 'payload': C19INDIA}]}]}}}}"], {}), "({'recipient': {'id': sender_id}, 'message': {'attachment': {\n 'type': 'template', 'payload': {'template_type': 'generic', 'elements':\n [{'title': 'Covid India Stats', 'subtitle':\n 'Get the covid19 stats of Indian states', 'buttons': [{'type':\n 'web_url', 'url':\n 'https://www.worldometers.info/coronavirus/country/india/', 'title':\n 'Open Worldometer India'}, {'type': 'postback', 'title':\n 'Get Stats By Indian States', 'payload': C19INDIA}]}]}}}})\n", (1515, 1986), False, 'import json\n'), ((6294, 6409), 'requests.post', 'requests.post', (['"""https://graph.facebook.com/v5.0/me/messages"""'], {'params': 'params', 'headers': 'headers', 'data': 'message_data'}), "('https://graph.facebook.com/v5.0/me/messages', params=params,\n headers=headers, data=message_data)\n", (6307, 6409), False, 'import requests\n'), ((564, 597), 'flask.request.args.get', 'request.args.get', (['"""hub.challenge"""'], {}), "('hub.challenge')\n", (580, 597), False, 'from flask import Flask, request\n'), ((5097, 5151), 'requests.get', 'requests.get', (['"""https://api.covid19india.org/data.json"""'], {}), "('https://api.covid19india.org/data.json')\n", (5109, 5151), False, 'import requests\n')] |
import os
from pathlib import Path
from django.apps import apps as django_apps
from django.conf import settings
from django.core.management.base import BaseCommand, CommandParser, DjangoHelpFormatter
from django.db.models import Model
from rich.align import Align
from rich.bar import Bar
from rich.console import Console
from rich.padding import Padding
from rich.style import Style
from rich.table import Table
from ._field_attr_utils import (
get_field_column,
get_field_db_type,
get_field_name,
get_field_name_on_reverse_model,
get_field_type,
get_field_type_on_reverse_model,
get_field_verbose_name,
get_related_model,
get_related_name,
)
from ._info_classes import FieldOther, FieldRelation, FieldReverseRelation, Method, ModelInfo
from ._method_attr_utils import get_method_docstring, get_method_file, get_method_line_number, get_method_signature
from ._model_attr_utils import (
get_model_base_manager,
get_model_database_table,
get_model_default_manager,
get_model_docstring,
get_model_file,
get_model_is_abstract,
get_model_is_managed,
get_model_is_proxy,
get_model_line_number,
get_model_name,
get_model_verbose_name,
)
console = Console(record=True)
DEFAULT_DJANGO_METHODS = (
"_check_column_name_clashes",
"_check_constraints",
"_check_default_pk",
"_check_field_name_clashes",
"_check_fields",
"_check_id_field",
"_check_index_together",
"_check_indexes",
"_check_local_fields",
"_check_long_column_names",
"_check_m2m_through_same_relationship",
"_check_managers",
"_check_model",
"_check_model_name_db_lookup_clashes",
"_check_ordering",
"_check_property_name_related_field_accessor_clashes",
"_check_single_primary_key",
"_check_swappable",
"_check_unique_together",
"_do_insert",
"_do_update",
"_get_expr_references",
"_get_FIELD_display",
"_get_next_or_previous_by_FIELD",
"_get_next_or_previous_in_order",
"_get_pk_val",
"_get_unique_checks",
"_meta",
"_perform_date_checks",
"_perform_unique_checks",
"_prepare_related_fields_for_save",
"_save_parents",
"_save_table",
"_set_pk_val",
"check",
"clean",
"clean_fields",
"date_error_message",
"delete",
"from_db",
"full_clean",
"get_absolute_url",
"get_deferred_fields",
"prepare_database_save",
"refresh_from_db",
"save",
"save_base",
"serializable_value",
"unique_error_message",
"validate_unique",
)
class Command(BaseCommand):
"""
A management command which lists models within your project, and optionally, details about model fields and methods
Verbosity outputs:
0 Model names only - Convenient when you just need a list of all your project's models in one place
1 Model names, field names, and non-dunder/common method names
2 * Model names, field names & details, and non-dunder/common method names & details
3 Model names, field names & details, and all method names & full details
* Verbosity of 2 is default
"""
help = "List out the fields and methods for each model"
def create_parser(self, prog_name, subcommand, **kwargs):
"""
Create and return the ``ArgumentParser`` which will be used to
parse the arguments to this command.
Reimplemented to allow new default verbosity of 2
"""
parser = CommandParser(
prog="%s %s" % (os.path.basename(prog_name), subcommand),
description=self.help or None,
formatter_class=DjangoHelpFormatter,
missing_args_message=getattr(self, "missing_args_message", None),
called_from_command_line=getattr(self, "_called_from_command_line", None),
**kwargs,
)
parser.add_argument("--version", action="version", version=self.get_version())
parser.add_argument(
"--settings",
help=(
"The Python path to a settings module, e.g. "
'"myproject.settings.main". If this isn\'t provided, the '
"DJANGO_SETTINGS_MODULE environment variable will be used."
),
)
parser.add_argument(
"--pythonpath",
help='A directory to add to the Python path, e.g. "/home/djangoprojects/myproject".',
)
parser.add_argument("--traceback", action="store_true", help="Raise on CommandError exceptions")
parser.add_argument(
"--no-color",
action="store_true",
help="Don't colorize the command output.",
)
parser.add_argument(
"--force-color",
action="store_true",
help="Force colorization of the command output.",
)
if self.requires_system_checks:
parser.add_argument(
"--skip-checks",
action="store_true",
help="Skip system checks.",
)
self.add_arguments(parser)
return parser
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument(
"-v",
"--verbosity",
default=2,
type=int,
choices=[0, 1, 2, 3],
help="Verbosity level: "
"0 Model names only - Convenient when you just need a list of all your project's models in one place, "
"1 Model names + field names +non-dunder/common method names, "
"2 (default) Model names + field names & details + non-dunder/common method names & details, "
"3 Model names + field names & details + all method names & details.",
)
parser.add_argument(
"-e",
"--export",
nargs="?",
type=str,
default=None,
help="Filename to export. The filename must have a file extension of `.txt`, `.html`, or `htm`",
)
parser.add_argument(
"-f",
"--filter",
nargs="+",
type=str,
default=None,
help="Provide one or more apps or models, to which the results will be limited. "
"Input should be in the form `appname` or `appname.Modelname`.",
)
def model_info(self, options):
section_style = Style(color="green", bold=True, underline=True)
subsection_style = Style(color="green", bold=True)
def get_options() -> tuple:
VERBOSITY = options.get("verbosity", None)
if VERBOSITY is None:
VERBOSITY = (
getattr(settings, "MODEL_INFO_VERBOSITY", 2)
if type(getattr(settings, "MODEL_INFO_VERBOSITY", 2)) is int
else 2
)
FILTER = options.get("filter", None)
if FILTER is None:
FILTER = (
getattr(settings, "MODEL_INFO_FILTER", None)
if type(getattr(settings, "MODEL_INFO_FILTER", None)) is list
else None
)
FILENAME = (
options.get("export")
if options.get("export", None) is not None and type(options.get("export", None)) is str
else None
)
return VERBOSITY, FILTER, FILENAME
VERBOSITY, FILTER, FILENAME = get_options()
def build_model_objects(model) -> ModelInfo:
"""
Given a model, returns a ModelInfo object
"""
new_model = ModelInfo()
new_model.model_name.value = get_model_name(model)
new_model.verbose_name.value = get_model_verbose_name(model)
new_model.docstring.value = get_model_docstring(model)
new_model.is_abstract.value = get_model_is_abstract(model)
new_model.is_proxy.value = get_model_is_proxy(model)
new_model.is_managed.value = get_model_is_managed(model)
new_model.database_table.value = get_model_database_table(model)
new_model.base_manager.value = get_model_base_manager(model)
new_model.default_manager.value = get_model_default_manager(model)
new_model.file.value = get_model_file(model)
new_model.line_number.value = get_model_line_number(model)
return new_model
def build_field_objects(field_list: list) -> tuple:
"""
Given a list of model fields, returns a tuple of FieldRelation,
FieldReverseRelation, and FieldOther object lists
"""
fields_relation = []
fields_reverse_relation = []
fields_other = []
for field in field_list:
# Identify the kind of field this is, and build associated object
if hasattr(field, "related_model") and field.related_model is not None:
if "reverse_related" in field.__class__.__module__.__str__():
# Build a FieldReverseRelation object
new_field = FieldReverseRelation()
new_field.name = get_related_name(field)
new_field.field_type = get_field_type(field)
new_field.field_db_type = get_field_db_type(field)
new_field.related_model = get_related_model(field)
new_field.field_name_on_related_model = get_field_name_on_reverse_model(field)
new_field.field_type_on_related_model = get_field_type_on_reverse_model(field)
fields_reverse_relation.append(new_field)
else:
# Build a FieldRelation object
new_field = FieldRelation()
new_field.name = get_field_name(field)
new_field.field_type = get_field_type(field)
new_field.field_column = get_field_column(field)
new_field.field_db_type = get_field_db_type(field)
new_field.related_model = get_related_model(field)
new_field.related_name = get_related_name(field)
fields_relation.append(new_field)
else:
# Build a FieldOther object
new_field = FieldOther()
new_field.name = get_field_name(field)
new_field.field_type = get_field_type(field)
new_field.field_column = get_field_column(field)
new_field.field_db_type = get_field_db_type(field)
new_field.field_verbose_name = get_field_verbose_name(field)
fields_other.append(new_field)
return (
fields_relation,
fields_reverse_relation,
fields_other,
)
def build_method_objects(method_list: list, model: Model) -> tuple:
"""
Given a list of model methods, returns a tuple of MethodCommonDjango,
MethodDunder, MethodOther, MethodOtherPrivate object lists
"""
method_dunder = []
method_common_django = []
method_other_private = []
method_other = []
for method in method_list:
# Build the object, and assign to the correct list
new_method = Method()
new_method.name = method
if VERBOSITY > 1:
new_method.method_signature = get_method_signature(method, model, VERBOSITY)
if VERBOSITY > 2:
new_method.method_docstring = get_method_docstring(method, model)
new_method.method_file = get_method_file(method, model)
new_method.method_line_number = get_method_line_number(method, model)
if method.startswith("__") and method.endswith("__"):
# Dunder methods
method_dunder.append(new_method)
elif method in DEFAULT_DJANGO_METHODS:
# Common Django methods
method_common_django.append(new_method)
elif method.startswith("_"):
# Other Private methods
method_other_private.append(new_method)
else:
# Other methods
method_other.append(new_method)
return (
method_dunder,
method_common_django,
method_other_private,
method_other,
)
def _fill_table(info_table: Table, info_object_list: list or None, info_type: type, column_count: int):
"""
Given a rich table, a list of
"""
if isinstance(info_object_list, list) and all(isinstance(row, info_type) for row in info_object_list):
sorted_field_object_list = sorted(info_object_list, key=lambda x: x.name)
for row in sorted_field_object_list:
if VERBOSITY >= 2:
info_table.add_row(*row.render_row(column_count=column_count))
else:
info_table.add_row(*row.render_simple_row())
else:
info_table.add_row("none")
return info_table
def _print_table(table):
console.print(Padding(table, (1, 0, 0, 8)))
def render_model_table(info_object_list: list or None, info_type: type):
"""Provided a list of FieldRelation objects, prints the resulting sorted table to console"""
model_table = Table(title="Model Details")
row_count = 2
if VERBOSITY > 1:
row_count = 5
if VERBOSITY > 2:
row_count = 11
model_table.add_column("Key", justify="left", style="blue", no_wrap=True)
model_table.add_column("Value", justify="left", style="magenta")
if isinstance(info_object_list, ModelInfo):
for row in info_object_list.render_rows(row_count):
new_row = tuple(row)
model_table.add_row(new_row[0], new_row[1])
else:
model_table.add_row("none")
_print_table(model_table)
def render_field_relations_table(info_object_list: list or None, info_type: type):
"""Provided a list of FieldRelation objects, prints the resulting sorted table to console"""
field_table = Table(title="Relations")
column_count = 1
field_table.add_column("Field Name", justify="left", style="yellow", no_wrap=True)
if VERBOSITY >= 2:
column_count = 6
field_table.add_column("Field Type", justify="left", style="magenta")
field_table.add_column("Database Column", justify="left", style="magenta")
field_table.add_column("Database Type", justify="left", style="magenta")
field_table.add_column("Related Model", justify="right", style="dark_red")
field_table.add_column("Related Name", justify="right", style="dark_red")
field_table = _fill_table(field_table, info_object_list, info_type, column_count)
_print_table(field_table)
def render_field_reverse_relations_table(info_object_list: list or None, info_type: type):
"""Provided a list of FieldReverseRelation objects, prints the resulting sorted table to console"""
field_table = Table(title="Reverse Relations")
column_count = 1
field_table.add_column("Related Name", justify="left", style="yellow", no_wrap=True)
if VERBOSITY >= 2:
column_count = 7
field_table.add_column("Field Type", justify="left", style="magenta")
field_table.add_column("Database Type", justify="left", style="magenta")
field_table.add_column("Related Model", justify="right", style="dark_red")
field_table.add_column("Field Name on Related Model", justify="left", style="dark_red")
field_table.add_column("Field Type on Related Model", justify="left", style="dark_red")
field_table = _fill_table(field_table, info_object_list, info_type, column_count)
_print_table(field_table)
def render_field_others_table(info_object_list: list or None, info_type: type):
"""Provided a list of FieldOther objects, prints the resulting sorted table to console"""
field_table = Table(title="Other Fields")
column_count = 1
field_table.add_column("Field Name", justify="left", style="yellow", no_wrap=True)
if VERBOSITY >= 2:
column_count = 6
field_table.add_column("Field Type", justify="left", style="magenta")
field_table.add_column("Database Column", justify="left", style="magenta")
field_table.add_column("Database Type", justify="left", style="magenta")
field_table.add_column("Verbose Name", justify="left", style="white")
field_table = _fill_table(field_table, info_object_list, info_type, column_count)
_print_table(field_table)
def render_method_table(info_object_list: list or None, info_type: str):
"""Provided a list of Method objects, prints the resulting sorted table to console"""
method_table = Table(title=info_type)
column_count = 1
method_table.add_column("Method Name", justify="left", style="cyan", no_wrap=True)
if VERBOSITY > 1:
column_count = 2
method_table.add_column("Signature", justify="left", style="magenta")
if VERBOSITY > 2:
column_count = 5
method_table.add_column("Docstring", justify="left", style="magenta")
method_table.add_column("File", justify="left", style="magenta")
method_table.add_column("Line Number", justify="left", style="magenta")
method_table = _fill_table(method_table, info_object_list, Method, column_count)
_print_table(method_table)
def get_model_list():
if FILTER is not None:
model_list = []
for filter_item in FILTER:
if filter_item.count(".") == 0:
# Get the models and add to the list
# model_list.append(django_apps.get_app_config(filter_item).get_models())
try:
app_models = [x for x in django_apps.get_app_config(filter_item).get_models()]
except LookupError as e:
print(f"Error while looking up `{filter_item}`: {e}")
else:
model_list.extend(app_models)
elif filter_item.count(".") == 1:
# Add to the model list
try:
filter_model = django_apps.get_model(filter_item)
except LookupError as e:
print(f"Error while looking up `{filter_item}`: {e}")
else:
model_list.append(filter_model)
else:
model_list = sorted(
django_apps.get_models(), key=lambda x: (x._meta.app_label, x._meta.object_name), reverse=False
)
return model_list
model_list = get_model_list()
for model in model_list:
if VERBOSITY > 0:
console.print(Padding("", (1, 0, 0, 0)))
console.print(Padding("", (0, 0, 0, 0), style=section_style))
console.print(Padding("", (0, 0, 0, 0)))
console.print(f"{model._meta.label}", style=section_style)
if VERBOSITY > 0:
def process_model():
build_model_objects(model)
model_info = build_model_objects(model)
render_model_table(model_info, list)
process_model()
def process_fields():
console.print(Padding("Fields:", (1, 0, 0, 4), style=subsection_style))
field_list = model._meta.get_fields(include_hidden=True)
fields_relation, fields_reverse_relation, fields_other = build_field_objects(field_list)
render_field_relations_table(fields_relation, FieldRelation)
render_field_reverse_relations_table(fields_reverse_relation, FieldReverseRelation)
render_field_others_table(fields_other, FieldOther)
process_fields()
def get_clean_method_list():
"""
Remove any potential method names that start with an uppercase character, are blank, or not callable
"""
return [
method_name
for method_name in dir(model)
if method_name is not None
and not method_name == ""
and not method_name[0].isupper()
and hasattr(model, method_name)
and callable(getattr(model, method_name))
]
method_list = get_clean_method_list()
def process_methods():
if VERBOSITY == 3:
console.print(Padding("Methods (all):", (1, 0, 0, 4), style=subsection_style))
else:
console.print(Padding("Methods (non-private/internal):", (1, 0, 0, 4), style=subsection_style))
method_dunder, method_common_django, method_other_private, method_other = build_method_objects(
method_list, model
)
if VERBOSITY > 1:
render_method_table(method_dunder, "Dunder Methods")
render_method_table(method_common_django, "Common Django Methods")
render_method_table(method_other_private, "Other Private methods")
render_method_table(method_other, "Other Methods")
process_methods()
self.stdout.write("\n")
console.print(f"\nTotal Models Listed: {len(model_list)}\n", style=section_style)
console.print(Align(Bar(size=0.1, begin=0.0, end=0.0, width=100), align="center"), style="red")
def process_export():
"""If a FILENAME was provided in options, try to save the appropriate type of file"""
if FILENAME is not None:
extension = Path(FILENAME).suffixes
if len(extension) > 0:
if any(x in extension[-1] for x in ["htm", "html"]):
console.save_html(path=FILENAME)
# Using print() to avoid exporting following line
print(f"Saved as {FILENAME}")
elif "txt" in extension[-1]:
console.save_text(path=FILENAME)
# Using print() to avoid exporting following line
print(f"Saved as {FILENAME}")
process_export()
def handle(self, *args, **options):
self.model_info(options)
| [
"rich.style.Style",
"django.apps.apps.get_models",
"pathlib.Path",
"rich.table.Table",
"rich.console.Console",
"django.apps.apps.get_app_config",
"os.path.basename",
"rich.bar.Bar",
"rich.padding.Padding",
"django.apps.apps.get_model"
] | [((1224, 1244), 'rich.console.Console', 'Console', ([], {'record': '(True)'}), '(record=True)\n', (1231, 1244), False, 'from rich.console import Console\n'), ((6402, 6449), 'rich.style.Style', 'Style', ([], {'color': '"""green"""', 'bold': '(True)', 'underline': '(True)'}), "(color='green', bold=True, underline=True)\n", (6407, 6449), False, 'from rich.style import Style\n'), ((6477, 6508), 'rich.style.Style', 'Style', ([], {'color': '"""green"""', 'bold': '(True)'}), "(color='green', bold=True)\n", (6482, 6508), False, 'from rich.style import Style\n'), ((13837, 13865), 'rich.table.Table', 'Table', ([], {'title': '"""Model Details"""'}), "(title='Model Details')\n", (13842, 13865), False, 'from rich.table import Table\n'), ((14732, 14756), 'rich.table.Table', 'Table', ([], {'title': '"""Relations"""'}), "(title='Relations')\n", (14737, 14756), False, 'from rich.table import Table\n'), ((15764, 15796), 'rich.table.Table', 'Table', ([], {'title': '"""Reverse Relations"""'}), "(title='Reverse Relations')\n", (15769, 15796), False, 'from rich.table import Table\n'), ((16812, 16839), 'rich.table.Table', 'Table', ([], {'title': '"""Other Fields"""'}), "(title='Other Fields')\n", (16817, 16839), False, 'from rich.table import Table\n'), ((17721, 17743), 'rich.table.Table', 'Table', ([], {'title': 'info_type'}), '(title=info_type)\n', (17726, 17743), False, 'from rich.table import Table\n'), ((13594, 13622), 'rich.padding.Padding', 'Padding', (['table', '(1, 0, 0, 8)'], {}), '(table, (1, 0, 0, 8))\n', (13601, 13622), False, 'from rich.padding import Padding\n'), ((22829, 22873), 'rich.bar.Bar', 'Bar', ([], {'size': '(0.1)', 'begin': '(0.0)', 'end': '(0.0)', 'width': '(100)'}), '(size=0.1, begin=0.0, end=0.0, width=100)\n', (22832, 22873), False, 'from rich.bar import Bar\n'), ((19681, 19705), 'django.apps.apps.get_models', 'django_apps.get_models', ([], {}), '()\n', (19703, 19705), True, 'from django.apps import apps as django_apps\n'), ((19958, 19983), 'rich.padding.Padding', 'Padding', (['""""""', '(1, 0, 0, 0)'], {}), "('', (1, 0, 0, 0))\n", (19965, 19983), False, 'from rich.padding import Padding\n'), ((20015, 20061), 'rich.padding.Padding', 'Padding', (['""""""', '(0, 0, 0, 0)'], {'style': 'section_style'}), "('', (0, 0, 0, 0), style=section_style)\n", (20022, 20061), False, 'from rich.padding import Padding\n'), ((20093, 20118), 'rich.padding.Padding', 'Padding', (['""""""', '(0, 0, 0, 0)'], {}), "('', (0, 0, 0, 0))\n", (20100, 20118), False, 'from rich.padding import Padding\n'), ((23099, 23113), 'pathlib.Path', 'Path', (['FILENAME'], {}), '(FILENAME)\n', (23103, 23113), False, 'from pathlib import Path\n'), ((3526, 3553), 'os.path.basename', 'os.path.basename', (['prog_name'], {}), '(prog_name)\n', (3542, 3553), False, 'import os\n'), ((20530, 20586), 'rich.padding.Padding', 'Padding', (['"""Fields:"""', '(1, 0, 0, 4)'], {'style': 'subsection_style'}), "('Fields:', (1, 0, 0, 4), style=subsection_style)\n", (20537, 20586), False, 'from rich.padding import Padding\n'), ((21876, 21939), 'rich.padding.Padding', 'Padding', (['"""Methods (all):"""', '(1, 0, 0, 4)'], {'style': 'subsection_style'}), "('Methods (all):', (1, 0, 0, 4), style=subsection_style)\n", (21883, 21939), False, 'from rich.padding import Padding\n'), ((22005, 22090), 'rich.padding.Padding', 'Padding', (['"""Methods (non-private/internal):"""', '(1, 0, 0, 4)'], {'style': 'subsection_style'}), "('Methods (non-private/internal):', (1, 0, 0, 4), style=subsection_style\n )\n", (22012, 22090), False, 'from rich.padding import Padding\n'), ((19350, 19384), 'django.apps.apps.get_model', 'django_apps.get_model', (['filter_item'], {}), '(filter_item)\n', (19371, 19384), True, 'from django.apps import apps as django_apps\n'), ((18903, 18942), 'django.apps.apps.get_app_config', 'django_apps.get_app_config', (['filter_item'], {}), '(filter_item)\n', (18929, 18942), True, 'from django.apps import apps as django_apps\n')] |
from django.urls import path
from django.conf.urls import url,include
from . import views
urlpatterns = [
path('',views.indexView,name='index'),
] | [
"django.urls.path"
] | [((108, 147), 'django.urls.path', 'path', (['""""""', 'views.indexView'], {'name': '"""index"""'}), "('', views.indexView, name='index')\n", (112, 147), False, 'from django.urls import path\n')] |
# login.txt should contain address on first line and app specific password on the second
#
# <EMAIL>
# <PASSWORD>
def sendEmail(subject, message_):
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
with open("login.txt") as f:
login = f.read().splitlines()
gmailUser = login[0]
gmailPassword = login[1]
recipient = login[0]
message = message_
msg = MIMEMultipart()
msg['From'] = gmailUser
msg['To'] = recipient
msg['Subject'] = subject
msg.attach(MIMEText(message))
mailServer = smtplib.SMTP('smtp.gmail.com', 587)
mailServer.ehlo()
mailServer.starttls()
mailServer.ehlo()
mailServer.login(gmailUser, gmailPassword)
mailServer.sendmail(gmailUser, recipient, msg.as_string())
mailServer.close() | [
"email.mime.multipart.MIMEMultipart",
"email.mime.text.MIMEText",
"smtplib.SMTP"
] | [((447, 462), 'email.mime.multipart.MIMEMultipart', 'MIMEMultipart', ([], {}), '()\n', (460, 462), False, 'from email.mime.multipart import MIMEMultipart\n'), ((598, 633), 'smtplib.SMTP', 'smtplib.SMTP', (['"""smtp.gmail.com"""', '(587)'], {}), "('smtp.gmail.com', 587)\n", (610, 633), False, 'import smtplib\n'), ((561, 578), 'email.mime.text.MIMEText', 'MIMEText', (['message'], {}), '(message)\n', (569, 578), False, 'from email.mime.text import MIMEText\n')] |
from dockit import schema
from dockit import backends
from django.utils import unittest
from mock import Mock, patch
class SimpleSchema(schema.Schema): #TODO make a more complex testcase
charfield = schema.CharField()
class SimpleDocument(schema.Document): #TODO make a more complex testcase
charfield = schema.CharField()
published = schema.BooleanField()
featured = schema.BooleanField()
class BackendTestCase(unittest.TestCase):
backend_name = None
def setUp(self):
self.patchers = list()
if self.backend_name not in backends.get_document_backends():
self.skipTest('Backend %s is not enabled' % self.backend_name)
def return_backend_name(*args, **kwargs):
return self.backend_name
mock = Mock(side_effect=return_backend_name)
self.patchers.append(patch.object(backends.DOCUMENT_ROUTER, 'get_storage_name_for_read', mock))
self.patchers.append(patch.object(backends.DOCUMENT_ROUTER, 'get_storage_name_for_write', mock))
self.patchers.append(patch.object(backends.INDEX_ROUTER, 'get_index_name_for_read', mock))
self.patchers.append(patch.object(backends.INDEX_ROUTER, 'get_index_name_for_write', mock))
self.mock_classes = list()
for patcher in self.patchers:
self.mock_classes.append(patcher.start())
def tearDown(self):
for patcher in self.patchers:
patcher.stop()
| [
"dockit.backends.get_document_backends",
"mock.Mock",
"mock.patch.object",
"dockit.schema.CharField",
"dockit.schema.BooleanField"
] | [((206, 224), 'dockit.schema.CharField', 'schema.CharField', ([], {}), '()\n', (222, 224), False, 'from dockit import schema\n'), ((316, 334), 'dockit.schema.CharField', 'schema.CharField', ([], {}), '()\n', (332, 334), False, 'from dockit import schema\n'), ((351, 372), 'dockit.schema.BooleanField', 'schema.BooleanField', ([], {}), '()\n', (370, 372), False, 'from dockit import schema\n'), ((388, 409), 'dockit.schema.BooleanField', 'schema.BooleanField', ([], {}), '()\n', (407, 409), False, 'from dockit import schema\n'), ((808, 845), 'mock.Mock', 'Mock', ([], {'side_effect': 'return_backend_name'}), '(side_effect=return_backend_name)\n', (812, 845), False, 'from mock import Mock, patch\n'), ((579, 611), 'dockit.backends.get_document_backends', 'backends.get_document_backends', ([], {}), '()\n', (609, 611), False, 'from dockit import backends\n'), ((875, 948), 'mock.patch.object', 'patch.object', (['backends.DOCUMENT_ROUTER', '"""get_storage_name_for_read"""', 'mock'], {}), "(backends.DOCUMENT_ROUTER, 'get_storage_name_for_read', mock)\n", (887, 948), False, 'from mock import Mock, patch\n'), ((979, 1053), 'mock.patch.object', 'patch.object', (['backends.DOCUMENT_ROUTER', '"""get_storage_name_for_write"""', 'mock'], {}), "(backends.DOCUMENT_ROUTER, 'get_storage_name_for_write', mock)\n", (991, 1053), False, 'from mock import Mock, patch\n'), ((1093, 1161), 'mock.patch.object', 'patch.object', (['backends.INDEX_ROUTER', '"""get_index_name_for_read"""', 'mock'], {}), "(backends.INDEX_ROUTER, 'get_index_name_for_read', mock)\n", (1105, 1161), False, 'from mock import Mock, patch\n'), ((1192, 1261), 'mock.patch.object', 'patch.object', (['backends.INDEX_ROUTER', '"""get_index_name_for_write"""', 'mock'], {}), "(backends.INDEX_ROUTER, 'get_index_name_for_write', mock)\n", (1204, 1261), False, 'from mock import Mock, patch\n')] |
'''
Vidbull urlresolver plugin
Copyright (C) 2013 Vinnydude
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
from t0mm0.common.net import Net
from urlresolver.plugnplay.interfaces import UrlResolver
from urlresolver.plugnplay.interfaces import PluginSettings
from urlresolver.plugnplay import Plugin
from urlresolver import common
class VidbullResolver(Plugin, UrlResolver, PluginSettings):
implements = [UrlResolver, PluginSettings]
name = "vidbull"
domains = ["vidbull.com"]
pattern = '//((?:www.)?vidbull.com)/(?:embed-)?([0-9a-zA-Z]+)'
def __init__(self):
p = self.get_setting('priority') or 100
self.priority = int(p)
self.net = Net()
def get_media_url(self, host, media_id):
headers = {
'User-Agent': common.IOS_USER_AGENT
}
web_url = self.get_url(host, media_id)
html = self.net.http_GET(web_url, headers=headers).content
match = re.search('<source\s+src="([^"]+)', html)
if match:
return match.group(1)
else:
raise UrlResolver.ResolverError('File Link Not Found')
def get_url(self, host, media_id):
return 'http://www.vidbull.com/%s' % media_id
def get_host_and_id(self, url):
r = re.search(self.pattern,url)
if r:
return r.groups()
else:
return False
return('host', 'media_id')
def valid_url(self, url, host):
if self.get_setting('enabled') == 'false': return False
return (re.search(self.pattern, url) or 'vidbull' in host)
| [
"re.search",
"urlresolver.plugnplay.interfaces.UrlResolver.ResolverError",
"t0mm0.common.net.Net"
] | [((1257, 1262), 't0mm0.common.net.Net', 'Net', ([], {}), '()\n', (1260, 1262), False, 'from t0mm0.common.net import Net\n'), ((1541, 1583), 're.search', 're.search', (['"""<source\\\\s+src="([^"]+)"""', 'html'], {}), '(\'<source\\\\s+src="([^"]+)\', html)\n', (1550, 1583), False, 'import re\n'), ((1859, 1887), 're.search', 're.search', (['self.pattern', 'url'], {}), '(self.pattern, url)\n', (1868, 1887), False, 'import re\n'), ((1667, 1715), 'urlresolver.plugnplay.interfaces.UrlResolver.ResolverError', 'UrlResolver.ResolverError', (['"""File Link Not Found"""'], {}), "('File Link Not Found')\n", (1692, 1715), False, 'from urlresolver.plugnplay.interfaces import UrlResolver\n'), ((2122, 2150), 're.search', 're.search', (['self.pattern', 'url'], {}), '(self.pattern, url)\n', (2131, 2150), False, 'import re\n')] |
from .utils import inverse as _inverse, gcd as _gcd
import itertools as _itertools
import re as _re
import copy as _copy
def affine_encrypt(msg, k, b):
res = ''
for c in msg:
if c.isalpha() == False:
res += c
continue
t = ord('A') if c.isupper() else ord('a')
res += chr((k * (ord(c) - t) + b) % 26 + t)
return res
def affine_decrypt(cipher, k, b):
res = ''
for c in cipher:
if c.isalpha() == False:
res += c
continue
t = ord('A') if c.isupper() else ord('a')
res += chr(((ord(c) - t) - b) * _inverse(k, 26) % 26 + t)
return res
def substitude_encrypt(msg, key):
res = ''
key = key.lower()
for c in msg:
if c.isalpha() == False:
res += c
continue
t = 0
if c.isupper():
t = ord('a') - ord('A')
res += key[ord(c.lower()) - ord('a') - t]
return res
def substitude_decrypt(cipher, key):
res = ''
key = key.lower()
for c in cipher:
if c.isalpha() == False:
res += c
continue
t = 0
if c.isupper():
t = ord('a') - ord('A')
res += chr(key.index(c.lower()) + ord('a') - t)
return res
def vigenere_encrypt(msg, key):
res = ''
key = key.lower()
it_key = _itertools.cycle(key)
for c in msg:
if c.isalpha() == False:
res += c
continue
k = next(it_key)
if c.isupper():
t = ord('A')
if c.islower():
t = ord('a')
c = ord(c) - t
res += chr((c + ord(k) - ord('a')) % 26 + t)
return res
def vigenere_decrypt(msg, key):
res = ''
key = key.lower()
it_key = _itertools.cycle(key)
for c in msg:
if c.isalpha() == False:
res += c
continue
k = next(it_key)
if c.isupper():
t = ord('A')
if c.islower():
t = ord('a')
c = ord(c) - t
res += chr((c - (ord(k) - ord('a'))) % 26 + t)
return res
def vermam_encrypt(msg, key):
cipher = bytes([m^ord(k) for m, k in zip(msg, _itertools.cycle(key))])
return cipher
def fence_encrypt(msg, key):
res = ''
length = len(msg)
for i in range(key):
for j in range(i, length, key):
res += msg[j]
return res
def fence_decrypt(msg, key):
res = ''
length = len(msg)
fenthlen = length // key + (0 if length % key == 0 else 1) # 向上取整
for i in range(fenthlen):
for j in range(i, length, fenthlen):
res += msg[j]
return res
def hill_calc(block, key):
X = []
Y = [0] * len(key)
for i in range(len(block)):
X.append(ord(block[i]) - ord('a'))
res = ''
for i in range(len(block)):
for j in range(len(block)):
Y[i] += X[j] * key[j][i]
res += chr(Y[i] % 26 + ord('a'))
return res
def matrix_invmod(mat, modulo):
line = len(mat)
extra_mat = [ ([0] * i + [1] + [0] * (line - i - 1)) for i in range(line) ]
for i in range(line):
k = i
for j in range(i + 1, line):
if mat[j][i] > mat[k][i]:
k = j
if mat[k][i] == 0:
return -1
div = mat[k][i]
mat[i], mat[k] = mat[k], mat[i]
extra_mat[i], extra_mat[k] = extra_mat[k], extra_mat[i]
if _gcd(div, modulo) != 1:
for j in range(i + 1, line):
if _gcd(div - mat[j][i], modulo) == 1:
for k in range(line):
mat[i][k] -= mat[j][k]
extra_mat[i][k] -= extra_mat[j][k]
div -= mat[j][i]
break
if _gcd(div, modulo) != 1:
return -1
for j in range(line):
mat[i][j] *= _inverse(div, modulo)
mat[i][j] %= modulo
extra_mat[i][j] *= _inverse(div, modulo)
extra_mat[i][j] %= modulo
for j in range(line):
if j == i:
continue
div = mat[j][i]
for k in range(line):
mat[j][k] -= mat[i][k] * div
mat[j][k] %= 26
extra_mat[j][k] -= extra_mat[i][k] * div
extra_mat[j][k] %= 26
return extra_mat
def hill_encrypt(msg, key):
res = ''
msg = msg.lower()
cryptlen = len(key)
fill = cryptlen - (len(msg) % cryptlen)
msg = _re.findall('.{' + str(cryptlen) + '}', msg + fill * chr(ord('a') + fill)) # 末尾填充并分组,由于 mod 26的限制,矩阵大小需要在 25 * 25 以内
for block in msg:
res += hill_calc(block, key)
return res
def hill_decrypt(msg, key):
de_key = matrix_invmod(key, 26)
res = ''
cryptlen = len(key)
msg = _re.findall('.{' + str(cryptlen) + '}', msg)
for block in msg:
res += hill_calc(block, de_key)
fill = ord(res[-1]) - ord('a') # 删除末尾填充
return res[:(-fill)]
def calc_variance(list_a, list_b):
sum = 0
for i in range(len(list_a)):
sum += ((list_a[i] - list_b[i]) ** 2) / list_b[i]
return sum
def attack_caesar(list):
true_rate = [8.167, 1.492, 2.782, 4.253, 12.702, 2.228, 2.015, 6.094, 6.996, 0.153, 0.772, 4.025, 2.406, 6.749, 7.507, 1.929, 0.095, 5.987, 6.327, 9.056, 2.758, 0.978, 2.360, 0.150, 1.974, 0.074]
res = []
for i in range(26):
res.append(calc_variance(list, true_rate))
list = list[1:] + list[:1]
return res
def mat_mult(mat_a, mat_b):
size_x = len(mat_a)
size_y = len(mat_b[0])
size_z = len(mat_b)
mat_res = [[0 for i in range(size_x)] for j in range(size_y)]
for i in range(size_x):
for j in range(size_y):
for k in range(size_z):
mat_res[i][j] += mat_a[i][k] * mat_b[k][j]
mat_res[i][j] %= 26
return mat_res
def attack_hill(plain, cipher, block_len):
plain_block = _re.findall('.{' + str(block_len) + '}', plain)
cipher_block = _re.findall('.{' + str(block_len) + '}', cipher)
res = False
for i, j in zip(_itertools.permutations(plain_block, block_len), _itertools.permutations(cipher_block, block_len)):
list_i = [list(k) for k in i]
list_j = [list(k) for k in j]
for len_i in range(len(list_i)):
for len_j in range(len(list_i[len_i])):
list_i[len_i][len_j] = ord(list_i[len_i][len_j]) - ord('a')
list_j[len_i][len_j] = ord(list_j[len_i][len_j]) - ord('a')
if matrix_invmod(_copy.deepcopy(list_i), 26) != -1:
mat_plain = matrix_invmod(list_i, 26)
mat_cipher = list_j
res = True
break
if res == False:
return 'no answer'
return mat_mult(mat_plain, mat_cipher)
def search_next(list, T):
res = []
for i in range(26):
for j in range(26):
if i == j:
continue
tmp_list = _copy.deepcopy(list) # 需要使用深复制,否则会改变原始值
tmp_list[1][i], tmp_list[1][j] = tmp_list[1][j], tmp_list[1][i]
tmp_list[2] = calc_variance([tmp_list[0][k][0] for k in range(26)], [tmp_list[1][k][0] for k in range(26)])
if tmp_list[2] > list[2]:
res.append(tmp_list)
res.sort(key = lambda tup: tup[2])
return res[:T]
def attack_substitude(test_rate, T):
true_rate = [8.167, 1.492, 2.782, 4.253, 12.702, 2.228, 2.015, 6.094, 6.996, 0.153, 0.772, 4.025, 2.406, 6.749, 7.507, 1.929, 0.095, 5.987, 6.327, 9.056, 2.758, 0.978, 2.360, 0.150, 1.974, 0.074]
true_rate = [[true_rate[i], i] for i in range(len(true_rate))]
test_rate = [[test_rate[i], i] for i in range(len(test_rate))]
true_rate.sort(key = lambda tup: tup[0], reverse = True)
test_rate.sort(key = lambda tup: tup[0], reverse = True)
basic = calc_variance([true_rate[i][0] for i in range(26)], [test_rate[i][0] for i in range(26)])
ans = []
queue = [[true_rate, test_rate, basic]]
while T > 0:
tmp = min(queue, key = lambda tup: tup[2])
queue.remove(tmp)
if len(ans) != 0:
while tmp[-1] == ans[-1][-1]: # 很可能出现重复情况,需要过滤掉
tmp = min(queue, key = lambda tup: tup[2])
queue.remove(tmp)
ans.append([[tmp[0][i][1], tmp[1][i][1]] for i in range(len(tmp[0]))] + [tmp[2]])
queue += search_next(tmp, T)
T = T - 1
return ans
| [
"itertools.permutations",
"itertools.cycle",
"copy.deepcopy"
] | [((1351, 1372), 'itertools.cycle', '_itertools.cycle', (['key'], {}), '(key)\n', (1367, 1372), True, 'import itertools as _itertools\n'), ((1761, 1782), 'itertools.cycle', '_itertools.cycle', (['key'], {}), '(key)\n', (1777, 1782), True, 'import itertools as _itertools\n'), ((6066, 6113), 'itertools.permutations', '_itertools.permutations', (['plain_block', 'block_len'], {}), '(plain_block, block_len)\n', (6089, 6113), True, 'import itertools as _itertools\n'), ((6115, 6163), 'itertools.permutations', '_itertools.permutations', (['cipher_block', 'block_len'], {}), '(cipher_block, block_len)\n', (6138, 6163), True, 'import itertools as _itertools\n'), ((6925, 6945), 'copy.deepcopy', '_copy.deepcopy', (['list'], {}), '(list)\n', (6939, 6945), True, 'import copy as _copy\n'), ((6512, 6534), 'copy.deepcopy', '_copy.deepcopy', (['list_i'], {}), '(list_i)\n', (6526, 6534), True, 'import copy as _copy\n'), ((2174, 2195), 'itertools.cycle', '_itertools.cycle', (['key'], {}), '(key)\n', (2190, 2195), True, 'import itertools as _itertools\n')] |
from pathlib import Path
from discord.ext import commands
from gainsworth.cogs.gainsworth_core import Gainsworth
bot = commands.Bot(command_prefix="$")
def test_logger():
g = Gainsworth(bot)
assert g.logger
def test_logfile():
path = Path("gainsworth_debug.log")
assert path.is_file()
| [
"discord.ext.commands.Bot",
"gainsworth.cogs.gainsworth_core.Gainsworth",
"pathlib.Path"
] | [((122, 154), 'discord.ext.commands.Bot', 'commands.Bot', ([], {'command_prefix': '"""$"""'}), "(command_prefix='$')\n", (134, 154), False, 'from discord.ext import commands\n'), ((184, 199), 'gainsworth.cogs.gainsworth_core.Gainsworth', 'Gainsworth', (['bot'], {}), '(bot)\n', (194, 199), False, 'from gainsworth.cogs.gainsworth_core import Gainsworth\n'), ((253, 281), 'pathlib.Path', 'Path', (['"""gainsworth_debug.log"""'], {}), "('gainsworth_debug.log')\n", (257, 281), False, 'from pathlib import Path\n')] |
from flask import Flask
from celery import Celery
# Create App
app = Flask(__name__)
app.config.from_object('config.Config')
# Create pp return
from ppserver.pixel.models import PPFeedback
pp = PPFeedback()
# Create Celery object
celery = Celery(app.name, broker=app.config['CELERY_BROKER_URL'])
celery.conf.update(app.config)
# Create Blueprints
from ppserver.pixel.controllers import pixel
app.register_blueprint(pixel)
import logging
from logging.handlers import RotatingFileHandler
file_handler = RotatingFileHandler(app.config.get('LOG_PATH'), 'a',
1 * 1024 * 1024, 10)
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'))
app.logger.addHandler(file_handler)
app.logger.setLevel(logging.INFO)
app.logger.info('PixelServer startup')
import views
| [
"celery.Celery",
"ppserver.pixel.models.PPFeedback",
"logging.Formatter",
"flask.Flask"
] | [((71, 86), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (76, 86), False, 'from flask import Flask\n'), ((197, 209), 'ppserver.pixel.models.PPFeedback', 'PPFeedback', ([], {}), '()\n', (207, 209), False, 'from ppserver.pixel.models import PPFeedback\n'), ((243, 299), 'celery.Celery', 'Celery', (['app.name'], {'broker': "app.config['CELERY_BROKER_URL']"}), "(app.name, broker=app.config['CELERY_BROKER_URL'])\n", (249, 299), False, 'from celery import Celery\n'), ((679, 772), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]"""'], {}), "(\n '%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')\n", (696, 772), False, 'import logging\n')] |
# -*- coding: utf-8 -*-
"""
/dms/exercisefolder/views_sitemap.py
.. zeigt die Sitemap des aktuellen Lernarchivs an
Django content Management System
<NAME>
<EMAIL>
Die Programme des dms-Systems koennen frei genutzt und den spezifischen
Beduerfnissen entsprechend angepasst werden.
0.01 02.05.2008 Beginn der Arbeit
"""
from django.shortcuts import render_to_response
from django.template import Context
from django.template.loader import get_template
from django.utils.translation import ugettext as _
from dms.queries import get_site_url
from dms.queries import get_container_sitemap
from dms.utils_form import get_folderish_vars_show
from dms.utils_base import show_link
#from dms.roles import *
from dms.edufolder.utils import get_user_support
from dms.edufolder.utils import get_folder_content
from dms.newsboard.utils import get_folder_content as get_newsboard_content
from dms_ext.extension import * # dms-Funktionen ueberschreiben
# -----------------------------------------------------
def exercisefolder_sitemap(request, item_container):
""" zeigt die Sitemap des Lernarchivs """
def get_sitemap(item_container, start, length=200):
""" liefert eine Liste der untergeordneten Lernarchive """
tSitemap = get_template('app/edufolder/sitemap.html')
path_length = len(item_container.container.path)
containers, count = get_container_sitemap(item_container, start, length,
False)
ret = ''
s = get_site_url(item_container, '')
#assert False
for container in containers:
p = container.path[path_length:]
if p != '':
n = p.count('/')
space = n * '| ' + ' '
ret += '%s<a href="%s%sindex.html">%s</a><br />\n' % (space, s, p, p)
# --- -2, weil ./ nicht zaehlt und von 0 an gerechnet wird
if start + length > count:
max = count-2
next = ''
else:
max = start + length
next = show_link('./?start='+str(start+200), _(u'weiter'),
url_class="navLink")
if start > 0:
prev = show_link('./?start='+str(start-200), _(u'zurück'),
url_class="navLink")
else:
prev = ''
section = Context ( { 'start': start,
'max': max,
'count': count-1,
'prev': prev,
'next': next,
'links': ret } )
return tSitemap.render(section)
app_name = u'edufolder'
if request.GET.has_key('start'):
start = int(request.GET['start'])
else:
start = 0
vars = get_folderish_vars_show(request, item_container, app_name,
get_sitemap(item_container, start),
get_user_support(item_container, request.user))
vars['text'] = ''
vars['title'] = _(u'Sitemap <i>dieses</i> Lernarchivs')
vars['image_url'] = ''
vars['slot_right_info'] = ''
return render_to_response ( 'app/base_folderish.html', vars )
| [
"dms.edufolder.utils.get_user_support",
"dms.queries.get_site_url",
"django.template.loader.get_template",
"django.shortcuts.render_to_response",
"django.template.Context",
"django.utils.translation.ugettext",
"dms.queries.get_container_sitemap"
] | [((2880, 2919), 'django.utils.translation.ugettext', '_', (['u"""Sitemap <i>dieses</i> Lernarchivs"""'], {}), "(u'Sitemap <i>dieses</i> Lernarchivs')\n", (2881, 2919), True, 'from django.utils.translation import ugettext as _\n'), ((2985, 3036), 'django.shortcuts.render_to_response', 'render_to_response', (['"""app/base_folderish.html"""', 'vars'], {}), "('app/base_folderish.html', vars)\n", (3003, 3036), False, 'from django.shortcuts import render_to_response\n'), ((1289, 1331), 'django.template.loader.get_template', 'get_template', (['"""app/edufolder/sitemap.html"""'], {}), "('app/edufolder/sitemap.html')\n", (1301, 1331), False, 'from django.template.loader import get_template\n'), ((1409, 1468), 'dms.queries.get_container_sitemap', 'get_container_sitemap', (['item_container', 'start', 'length', '(False)'], {}), '(item_container, start, length, False)\n', (1430, 1468), False, 'from dms.queries import get_container_sitemap\n'), ((1536, 1568), 'dms.queries.get_site_url', 'get_site_url', (['item_container', '""""""'], {}), "(item_container, '')\n", (1548, 1568), False, 'from dms.queries import get_site_url\n'), ((2272, 2375), 'django.template.Context', 'Context', (["{'start': start, 'max': max, 'count': count - 1, 'prev': prev, 'next': next,\n 'links': ret}"], {}), "({'start': start, 'max': max, 'count': count - 1, 'prev': prev,\n 'next': next, 'links': ret})\n", (2279, 2375), False, 'from django.template import Context\n'), ((2794, 2840), 'dms.edufolder.utils.get_user_support', 'get_user_support', (['item_container', 'request.user'], {}), '(item_container, request.user)\n', (2810, 2840), False, 'from dms.edufolder.utils import get_user_support\n'), ((2047, 2059), 'django.utils.translation.ugettext', '_', (['u"""weiter"""'], {}), "(u'weiter')\n", (2048, 2059), True, 'from django.utils.translation import ugettext as _\n'), ((2174, 2186), 'django.utils.translation.ugettext', '_', (['u"""zurück"""'], {}), "(u'zurück')\n", (2175, 2186), True, 'from django.utils.translation import ugettext as _\n')] |
import sys
if sys.version_info >= (2, 7):
import unittest
else:
try:
import unittest2 as unittest
except ImportError:
raise RuntimeError("unittest2 is required for Python < 2.7")
import sys
from twiggy import levels
class LevelTestCase(unittest.TestCase):
def test_display(self):
assert str(levels.DEBUG) == 'DEBUG'
assert repr(levels.DEBUG) == '<LogLevel DEBUG>'
def test_name2level(self):
assert levels.name2level('debug') is levels.DEBUG
assert levels.name2level('Debug') is levels.DEBUG
def test_less_than(self):
assert levels.DEBUG < levels.INFO
assert levels.INFO < levels.NOTICE
assert levels.NOTICE < levels.WARNING
assert levels.WARNING < levels.ERROR
assert levels.ERROR < levels.CRITICAL
assert levels.CRITICAL < levels.DISABLED
def test_less_than_equals(self):
assert levels.DEBUG <= levels.INFO
assert levels.INFO <= levels.NOTICE
assert levels.NOTICE <= levels.WARNING
assert levels.WARNING <= levels.ERROR
assert levels.ERROR <= levels.CRITICAL
assert levels.CRITICAL <= levels.DISABLED
def test_greater_than(self):
assert levels.INFO > levels.DEBUG
assert levels.NOTICE > levels.INFO
assert levels.WARNING > levels.NOTICE
assert levels.ERROR > levels.WARNING
assert levels.CRITICAL > levels.ERROR
assert levels.DISABLED > levels.CRITICAL
def test_greater_than_equals(self):
assert levels.INFO >= levels.DEBUG
assert levels.NOTICE >= levels.INFO
assert levels.WARNING >= levels.NOTICE
assert levels.ERROR >= levels.WARNING
assert levels.CRITICAL >= levels.ERROR
assert levels.DISABLED >= levels.CRITICAL
def test_equality(self):
assert levels.DEBUG == levels.DEBUG
assert levels.INFO == levels.INFO
assert levels.NOTICE == levels.NOTICE
assert levels.WARNING == levels.WARNING
assert levels.ERROR == levels.ERROR
assert levels.CRITICAL == levels.CRITICAL
def test_inequality(self):
assert not levels.DEBUG != levels.DEBUG
assert not levels.INFO != levels.INFO
assert not levels.NOTICE != levels.NOTICE
assert not levels.WARNING != levels.WARNING
assert not levels.ERROR != levels.ERROR
assert not levels.CRITICAL != levels.CRITICAL
assert levels.INFO != levels.DEBUG
assert levels.NOTICE != levels.WARNING
assert levels.WARNING != levels.NOTICE
assert levels.ERROR != levels.WARNING
assert levels.CRITICAL != levels.ERROR
assert levels.DISABLED != levels.CRITICAL
def test_dict_key(self):
d={levels.DEBUG:42}
assert d[levels.DEBUG] == 42
def test_bogus_not_equals(self):
assert levels.DEBUG != 1
@unittest.skipIf(sys.version_info < (3,), "Python 2.x comparisons are insane")
def test_bogus_compare(self):
# XXX is there a comparable test for 2.x?
with self.assertRaises(TypeError):
levels.DEBUG < 42
| [
"unittest2.skipIf",
"twiggy.levels.name2level"
] | [((2891, 2968), 'unittest2.skipIf', 'unittest.skipIf', (['(sys.version_info < (3,))', '"""Python 2.x comparisons are insane"""'], {}), "(sys.version_info < (3,), 'Python 2.x comparisons are insane')\n", (2906, 2968), True, 'import unittest2 as unittest\n'), ((463, 489), 'twiggy.levels.name2level', 'levels.name2level', (['"""debug"""'], {}), "('debug')\n", (480, 489), False, 'from twiggy import levels\n'), ((521, 547), 'twiggy.levels.name2level', 'levels.name2level', (['"""Debug"""'], {}), "('Debug')\n", (538, 547), False, 'from twiggy import levels\n')] |
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from flask import Flask, render_template, request, jsonify, flash, redirect, url_for
import pickle
from FaceDetector_v7 import EmotionFacePredictor
import json
from werkzeug.utils import secure_filename
import tensorflow as tf
global graph,model
graph = tf.get_default_graph()
plt.ioff()
app = Flask(__name__)
home = os.getcwd()
# home = '/home/ubuntu/efc/src/'
cv2_path = './'
bestmodelfilepath = './CNN_cont.hdf5'
efp = EmotionFacePredictor(home, cv2_path, bestmodelfilepath)
efp.run_setup()
UPLOAD_FOLDER = 'static/images/'
print(UPLOAD_FOLDER)
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])
if not os.path.isdir(UPLOAD_FOLDER):
os.makedirs(UPLOAD_FOLDER)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
# prevent cached responses
@app.after_request
def add_header(r):
"""
Add headers to both force latest IE rendering engine or Chrome Frame,
and also to cache the rendered page for 10 minutes.
"""
r.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
r.headers["Pragma"] = "no-cache"
r.headers["Expires"] = "0"
r.headers['Cache-Control'] = 'public, max-age=0'
return r
@app.route('/', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
# if user does not select file, browser also
# submit a empty part without filename
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
return redirect(url_for('predict',
filename=filename))
return render_template('form/index.html')
@app.route('/predict', methods=['GET','POST'])
def predict():
"""Recieve the article to be classified from an input form and use the
model to classify.
"""
response = request.args.get('filename')
image = os.path.join(app.config['UPLOAD_FOLDER'], response)
print(image)
with graph.as_default():
results = efp.classify_faces_image(image)
print("RESULTS HERE:")
print(results)
if not results:
print("NOTHING TO SEE HERE")
return render_template('form/no_face_found.html', response=image)
# else:
print("Found some faces!!")
top_emotions = [efp.emo_list[x[0]] for x in results[1]]
return render_template('form/predict.html',
orig_img=image,
faces = results[0],
top_emos = top_emotions)
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
| [
"flask.render_template",
"flask.request.args.get",
"flask.flash",
"os.makedirs",
"flask.Flask",
"matplotlib.use",
"os.path.join",
"matplotlib.pyplot.ioff",
"os.getcwd",
"flask.redirect",
"flask.url_for",
"os.path.isdir",
"werkzeug.utils.secure_filename",
"FaceDetector_v7.EmotionFacePredict... | [((29, 50), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (43, 50), False, 'import matplotlib\n'), ((339, 361), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (359, 361), True, 'import tensorflow as tf\n'), ((362, 372), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (370, 372), True, 'import matplotlib.pyplot as plt\n'), ((380, 395), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (385, 395), False, 'from flask import Flask, render_template, request, jsonify, flash, redirect, url_for\n'), ((404, 415), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (413, 415), False, 'import os\n'), ((509, 564), 'FaceDetector_v7.EmotionFacePredictor', 'EmotionFacePredictor', (['home', 'cv2_path', 'bestmodelfilepath'], {}), '(home, cv2_path, bestmodelfilepath)\n', (529, 564), False, 'from FaceDetector_v7 import EmotionFacePredictor\n'), ((700, 728), 'os.path.isdir', 'os.path.isdir', (['UPLOAD_FOLDER'], {}), '(UPLOAD_FOLDER)\n', (713, 728), False, 'import os\n'), ((734, 760), 'os.makedirs', 'os.makedirs', (['UPLOAD_FOLDER'], {}), '(UPLOAD_FOLDER)\n', (745, 760), False, 'import os\n'), ((2201, 2235), 'flask.render_template', 'render_template', (['"""form/index.html"""'], {}), "('form/index.html')\n", (2216, 2235), False, 'from flask import Flask, render_template, request, jsonify, flash, redirect, url_for\n'), ((2420, 2448), 'flask.request.args.get', 'request.args.get', (['"""filename"""'], {}), "('filename')\n", (2436, 2448), False, 'from flask import Flask, render_template, request, jsonify, flash, redirect, url_for\n'), ((2461, 2512), 'os.path.join', 'os.path.join', (["app.config['UPLOAD_FOLDER']", 'response'], {}), "(app.config['UPLOAD_FOLDER'], response)\n", (2473, 2512), False, 'import os\n'), ((2901, 2998), 'flask.render_template', 'render_template', (['"""form/predict.html"""'], {'orig_img': 'image', 'faces': 'results[0]', 'top_emos': 'top_emotions'}), "('form/predict.html', orig_img=image, faces=results[0],\n top_emos=top_emotions)\n", (2916, 2998), False, 'from flask import Flask, render_template, request, jsonify, flash, redirect, url_for\n'), ((2727, 2785), 'flask.render_template', 'render_template', (['"""form/no_face_found.html"""'], {'response': 'image'}), "('form/no_face_found.html', response=image)\n", (2742, 2785), False, 'from flask import Flask, render_template, request, jsonify, flash, redirect, url_for\n'), ((1598, 1619), 'flask.flash', 'flash', (['"""No file part"""'], {}), "('No file part')\n", (1603, 1619), False, 'from flask import Flask, render_template, request, jsonify, flash, redirect, url_for\n'), ((1639, 1660), 'flask.redirect', 'redirect', (['request.url'], {}), '(request.url)\n', (1647, 1660), False, 'from flask import Flask, render_template, request, jsonify, flash, redirect, url_for\n'), ((1842, 1867), 'flask.flash', 'flash', (['"""No selected file"""'], {}), "('No selected file')\n", (1847, 1867), False, 'from flask import Flask, render_template, request, jsonify, flash, redirect, url_for\n'), ((1887, 1908), 'flask.redirect', 'redirect', (['request.url'], {}), '(request.url)\n', (1895, 1908), False, 'from flask import Flask, render_template, request, jsonify, flash, redirect, url_for\n'), ((1981, 2011), 'werkzeug.utils.secure_filename', 'secure_filename', (['file.filename'], {}), '(file.filename)\n', (1996, 2011), False, 'from werkzeug.utils import secure_filename\n'), ((2034, 2085), 'os.path.join', 'os.path.join', (["app.config['UPLOAD_FOLDER']", 'filename'], {}), "(app.config['UPLOAD_FOLDER'], filename)\n", (2046, 2085), False, 'import os\n'), ((2115, 2152), 'flask.url_for', 'url_for', (['"""predict"""'], {'filename': 'filename'}), "('predict', filename=filename)\n", (2122, 2152), False, 'from flask import Flask, render_template, request, jsonify, flash, redirect, url_for\n')] |
""" Hook decorator. """
import logging
from eris.decorators import BaseDecorator
from eris.events.hooks import Hook as EventHook
LOGGER = logging.getLogger(__name__)
class Hook(BaseDecorator):
""" Hook decorator, used for more easily setting up handlers for messages. """
hook: EventHook = None
def __init__(self, name: str, event_type: str, contains: str = '', match_criteria: str = 'all'):
""" Create a hook using this function, can be applied multiple times.
:param name: Name of the hook, used for documentation purposes and debugging.
:param event_type: Which type of event this is a hook for (e.g., message)
:param contains: Possible payload string to filter messages for
:param match_criteria: Which criteria to match the hook against (any or all)
"""
self.hook = EventHook(name=name, _type=event_type, contains=contains, match_criteria=match_criteria)
def __call__(self, func):
self.hook.callback = func
if hasattr(func, 'hooks'):
func.hooks.append(self.hook)
else:
func.hooks = [self.hook]
return func
| [
"logging.getLogger",
"eris.events.hooks.Hook"
] | [((140, 167), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (157, 167), False, 'import logging\n'), ((847, 940), 'eris.events.hooks.Hook', 'EventHook', ([], {'name': 'name', '_type': 'event_type', 'contains': 'contains', 'match_criteria': 'match_criteria'}), '(name=name, _type=event_type, contains=contains, match_criteria=\n match_criteria)\n', (856, 940), True, 'from eris.events.hooks import Hook as EventHook\n')] |
#!/usr/bin/python
from colorama import init, Fore, Back, Style
init(autoreset=True)
print(Fore.RED + 'some red text')
print(Fore.GREEN + 'some green text')
print(Fore.BLUE + 'some blue text')
print(Fore.CYAN + 'some cyan text')
print(Fore.MAGENTA + 'some magenta text')
print(Back.GREEN + 'and with a green background')
print(Style.DIM + 'and in dim text')
print(Style.BRIGHT + Fore.GREEN + 'and green color in bright text')
print('automatically back to default color again')
# These are available color
"""
Fore: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET.
Back: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET.
Style: DIM, NORMAL, BRIGHT, RESET_ALL
"""
| [
"colorama.init"
] | [((64, 84), 'colorama.init', 'init', ([], {'autoreset': '(True)'}), '(autoreset=True)\n', (68, 84), False, 'from colorama import init, Fore, Back, Style\n')] |
from .conftest import TestTimeouts
from ftplib import FTP
from socket import timeout
class TestFtplib(TestTimeouts):
def test_connect(self):
with self.raises(timeout):
with FTP(self.connect_host(), timeout=1) as ftp:
ftp.login()
def test_read(self):
with self.raises(timeout):
with FTP(timeout=1) as ftp:
ftp.connect(self.read_host(), self.read_port())
ftp.login()
| [
"ftplib.FTP"
] | [((349, 363), 'ftplib.FTP', 'FTP', ([], {'timeout': '(1)'}), '(timeout=1)\n', (352, 363), False, 'from ftplib import FTP\n')] |
from aoi_envs.MultiAgent import MultiAgentEnv
import numpy as np
class MobileEnv(MultiAgentEnv):
def __init__(self, agent_velocity=1.0, initialization='Random', biased_velocities=False, flocking=False,
random_acceleration=True, aoi_reward=True, flocking_position_control=False, num_agents=40):
super().__init__(eavesdropping=True, fractional_power_levels=[0.25, 0.0], initialization=initialization,
aoi_reward=aoi_reward, num_agents=num_agents)
self.ts_length = 0.1
self.max_velocity = agent_velocity * self.distance_scale / self.ts_length / self.episode_length
self.max_acceleration = 10.0
self.gain = 1.0
self.recompute_solution = True
self.mobile_agents = True
self.flocking = flocking
self.flocking_position_control = flocking_position_control
self.random_acceleration = random_acceleration
self.biased_velocities = biased_velocities
def reset(self):
super().reset()
if self.random_acceleration or (self.flocking and not self.biased_velocities):
self.x[:, 2:4] = np.random.uniform(-self.max_velocity, self.max_velocity, size=(self.n_agents, 2))
elif self.flocking and self.biased_velocities:
self.x[:, 2:4] = np.random.uniform(0.5 * -self.max_velocity, 0.5 * self.max_velocity, size=(self.n_agents, 2))
self.x[:, 2:4] = self.x[:, 2:4] + np.random.uniform(0.5 * -self.max_velocity, 0.5 * self.max_velocity, size=(1, 2))
else:
angle = np.pi * np.random.uniform(0, 2, size=(self.n_agents,))
self.x[:, 2] = self.max_velocity * np.cos(angle)
self.x[:, 3] = self.max_velocity * np.sin(angle)
self.network_buffer[:, :, 4:6] = np.where(self.diag,
self.x[:, 2:4].reshape(self.n_agents, 1, 2),
self.network_buffer[:, :, 4:6])
return self.get_relative_network_buffer_as_dict()
def step(self, attempted_transmissions):
self.move_agents()
return super().step(attempted_transmissions)
def potential_grad(self, pos_diff, r2):
grad = -2.0 * np.divide(pos_diff, np.multiply(r2, r2)) + 2 * np.divide(pos_diff, r2)
return grad
def move_agents(self):
new_pos = self.x[:, 0:2] + self.x[:, 2:4] * self.ts_length
if self.flocking or self.random_acceleration:
if self.flocking:
known_velocities = np.copy(self.network_buffer[:, :, 4:6])
known_velocities[known_velocities == 0] = np.nan
known_velocities -= (self.x[:, 2:4])[:, np.newaxis, :]
acceleration = np.nanmean(known_velocities, axis=1)
if self.flocking_position_control:
steady_state_scale = self.r_max / 5.0
known_positions = np.copy(self.network_buffer[:, :, 2:4])
known_positions[known_positions == 0] = np.nan
known_positions = (known_positions - (self.x[:, 2:4])[:, np.newaxis, :]) / steady_state_scale
r2 = np.sum(known_positions ** 2, axis=2)[:, :, np.newaxis]
grad = -2.0 * np.divide(known_positions, np.multiply(r2, r2)) + 2 * np.divide(known_positions, r2)
acceleration += np.nansum(grad, axis=1) * steady_state_scale
else:
# acceleration = np.random.uniform(-self.max_acceleration, self.max_acceleration, size=(self.n_agents, 2))
acceleration = np.random.normal(0., self.max_acceleration / 3.0, size=(self.n_agents, 2))
acceleration = np.clip(acceleration, -self.max_acceleration, self.max_acceleration)
self.x[:, 2:4] += self.gain * self.ts_length * acceleration
self.x[:, 2:4] = np.clip(self.x[:, 2:4], -self.max_velocity, self.max_velocity)
if self.flocking:
self.x[:, 0:2] = new_pos
else:
self.x[:, 0:2] = np.clip(new_pos[:, 0:2], -self.r_max, self.r_max)
self.x[:, 2:4] = np.where((self.x[:, 0:2] - new_pos[:, 0:2]) == 0, self.x[:, 2:4], -self.x[:, 2:4])
self.network_buffer[:, :, 2:6] = np.where(self.diag,
self.x[:, 0:4].reshape(self.n_agents, 1, 4),
self.network_buffer[:, :, 2:6])
| [
"numpy.clip",
"numpy.copy",
"numpy.random.normal",
"numpy.multiply",
"numpy.where",
"numpy.nanmean",
"numpy.sum",
"numpy.cos",
"numpy.random.uniform",
"numpy.sin",
"numpy.nansum",
"numpy.divide"
] | [((1140, 1226), 'numpy.random.uniform', 'np.random.uniform', (['(-self.max_velocity)', 'self.max_velocity'], {'size': '(self.n_agents, 2)'}), '(-self.max_velocity, self.max_velocity, size=(self.\n n_agents, 2))\n', (1157, 1226), True, 'import numpy as np\n'), ((3705, 3773), 'numpy.clip', 'np.clip', (['acceleration', '(-self.max_acceleration)', 'self.max_acceleration'], {}), '(acceleration, -self.max_acceleration, self.max_acceleration)\n', (3712, 3773), True, 'import numpy as np\n'), ((3875, 3937), 'numpy.clip', 'np.clip', (['self.x[:, 2:4]', '(-self.max_velocity)', 'self.max_velocity'], {}), '(self.x[:, 2:4], -self.max_velocity, self.max_velocity)\n', (3882, 3937), True, 'import numpy as np\n'), ((4045, 4094), 'numpy.clip', 'np.clip', (['new_pos[:, 0:2]', '(-self.r_max)', 'self.r_max'], {}), '(new_pos[:, 0:2], -self.r_max, self.r_max)\n', (4052, 4094), True, 'import numpy as np\n'), ((4124, 4209), 'numpy.where', 'np.where', (['(self.x[:, 0:2] - new_pos[:, 0:2] == 0)', 'self.x[:, 2:4]', '(-self.x[:, 2:4])'], {}), '(self.x[:, 0:2] - new_pos[:, 0:2] == 0, self.x[:, 2:4], -self.x[:, 2:4]\n )\n', (4132, 4209), True, 'import numpy as np\n'), ((1306, 1404), 'numpy.random.uniform', 'np.random.uniform', (['(0.5 * -self.max_velocity)', '(0.5 * self.max_velocity)'], {'size': '(self.n_agents, 2)'}), '(0.5 * -self.max_velocity, 0.5 * self.max_velocity, size=(\n self.n_agents, 2))\n', (1323, 1404), True, 'import numpy as np\n'), ((2277, 2300), 'numpy.divide', 'np.divide', (['pos_diff', 'r2'], {}), '(pos_diff, r2)\n', (2286, 2300), True, 'import numpy as np\n'), ((2536, 2575), 'numpy.copy', 'np.copy', (['self.network_buffer[:, :, 4:6]'], {}), '(self.network_buffer[:, :, 4:6])\n', (2543, 2575), True, 'import numpy as np\n'), ((2743, 2779), 'numpy.nanmean', 'np.nanmean', (['known_velocities'], {'axis': '(1)'}), '(known_velocities, axis=1)\n', (2753, 2779), True, 'import numpy as np\n'), ((3602, 3677), 'numpy.random.normal', 'np.random.normal', (['(0.0)', '(self.max_acceleration / 3.0)'], {'size': '(self.n_agents, 2)'}), '(0.0, self.max_acceleration / 3.0, size=(self.n_agents, 2))\n', (3618, 3677), True, 'import numpy as np\n'), ((1446, 1532), 'numpy.random.uniform', 'np.random.uniform', (['(0.5 * -self.max_velocity)', '(0.5 * self.max_velocity)'], {'size': '(1, 2)'}), '(0.5 * -self.max_velocity, 0.5 * self.max_velocity, size=(\n 1, 2))\n', (1463, 1532), True, 'import numpy as np\n'), ((1570, 1616), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(2)'], {'size': '(self.n_agents,)'}), '(0, 2, size=(self.n_agents,))\n', (1587, 1616), True, 'import numpy as np\n'), ((1664, 1677), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (1670, 1677), True, 'import numpy as np\n'), ((1725, 1738), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (1731, 1738), True, 'import numpy as np\n'), ((2250, 2269), 'numpy.multiply', 'np.multiply', (['r2', 'r2'], {}), '(r2, r2)\n', (2261, 2269), True, 'import numpy as np\n'), ((2928, 2967), 'numpy.copy', 'np.copy', (['self.network_buffer[:, :, 2:4]'], {}), '(self.network_buffer[:, :, 2:4])\n', (2935, 2967), True, 'import numpy as np\n'), ((3174, 3210), 'numpy.sum', 'np.sum', (['(known_positions ** 2)'], {'axis': '(2)'}), '(known_positions ** 2, axis=2)\n', (3180, 3210), True, 'import numpy as np\n'), ((3384, 3407), 'numpy.nansum', 'np.nansum', (['grad'], {'axis': '(1)'}), '(grad, axis=1)\n', (3393, 3407), True, 'import numpy as np\n'), ((3317, 3347), 'numpy.divide', 'np.divide', (['known_positions', 'r2'], {}), '(known_positions, r2)\n', (3326, 3347), True, 'import numpy as np\n'), ((3290, 3309), 'numpy.multiply', 'np.multiply', (['r2', 'r2'], {}), '(r2, r2)\n', (3301, 3309), True, 'import numpy as np\n')] |
from unittest.mock import MagicMock
import pytest
from datastore.shared.di import injector
from datastore.shared.services import EnvironmentService, ShutdownService
from datastore.writer.redis_backend.connection_handler import ConnectionHandler
from datastore.writer.redis_backend.redis_connection_handler import (
RedisConnectionHandlerService,
)
from tests import reset_di # noqa
@pytest.fixture(autouse=True)
def provide_di(reset_di): # noqa
injector.register(ShutdownService, ShutdownService)
injector.register(EnvironmentService, EnvironmentService)
injector.register(ConnectionHandler, RedisConnectionHandlerService)
yield
@pytest.fixture()
def connection(provide_di):
yield injector.get(ConnectionHandler)
def test_xadd_empty_arguments(connection):
connection.ensure_connection = ec = MagicMock()
connection.xadd(None, None)
ec.assert_not_called()
def test_shutdown(connection):
connection.connection = c = MagicMock()
connection.shutdown()
c.close.assert_called()
assert connection.connection is None
| [
"pytest.fixture",
"datastore.shared.di.injector.get",
"unittest.mock.MagicMock",
"datastore.shared.di.injector.register"
] | [((392, 420), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (406, 420), False, 'import pytest\n'), ((658, 674), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (672, 674), False, 'import pytest\n'), ((459, 510), 'datastore.shared.di.injector.register', 'injector.register', (['ShutdownService', 'ShutdownService'], {}), '(ShutdownService, ShutdownService)\n', (476, 510), False, 'from datastore.shared.di import injector\n'), ((515, 572), 'datastore.shared.di.injector.register', 'injector.register', (['EnvironmentService', 'EnvironmentService'], {}), '(EnvironmentService, EnvironmentService)\n', (532, 572), False, 'from datastore.shared.di import injector\n'), ((577, 644), 'datastore.shared.di.injector.register', 'injector.register', (['ConnectionHandler', 'RedisConnectionHandlerService'], {}), '(ConnectionHandler, RedisConnectionHandlerService)\n', (594, 644), False, 'from datastore.shared.di import injector\n'), ((830, 841), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (839, 841), False, 'from unittest.mock import MagicMock\n'), ((966, 977), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (975, 977), False, 'from unittest.mock import MagicMock\n'), ((713, 744), 'datastore.shared.di.injector.get', 'injector.get', (['ConnectionHandler'], {}), '(ConnectionHandler)\n', (725, 744), False, 'from datastore.shared.di import injector\n')] |
# Copyright (c) 2015 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# Function to infer the germline sequences of a set of chains, given their IMGT nucleotide analysis.
# Each gene is reverted to its germline and aligned with the chain. Other elements (P, N) are assumed not to have
# mutated from the germline. This hypothesis should be checked by examining other members of the clonal
# family.
#
# sequence_file is the name of an IMGT nucleotide analysis csv or tab separated file containing analyses of the sequences
# germline_lib is the name of the germline library to use (single file in IMGT format). The IMGT germline library
# for all species is downloadable at http://www.imgt.org/download/GENE-DB/IMGTGENEDB-ReferenceSequences.fasta-nt-WithoutGaps-F+ORF+inframeP
# species_name is the species name as used in the germline file, e.g. "Homo sapiens"
# output_file is the name of the output file.
# option is any combination of:
# f - list 'full' germlines where genes are reverted and ns preserved
# v - list germlines where just the v-gene is reverted and other regions gapped
# j - list germlines where all genes are reverted and n-regions gapped
# i - list input sequences
# o - list derived germline for each input sequence
# c - list consensus for derived germlines
# x - write verbose analysis through the report functio
# fixed_mut - if this parameter is nonzero, the function will list V-gene mutations from germline that are seen in all
# sequences of a particular germline, for all germlines where there are at least fixed_mut sequences.
# in all cases, output sequences are aligned on full codon boundaries and incomplete codons are gapped out
# report is a function called whenever there is status info to report
# error is a function called when there is a (catastrophic) error
__author__ = '<NAME>'
__docformat__ = "restructuredtext en"
import sys
import csv
import traceback
from Germlib import Germlib
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Align import AlignInfo
from Bio.Align import MultipleSeqAlignment
def germline_from_imgt(sequence_file, germline_lib, species_name, output_file, option, report, error, fixed_mut):
"""
Test cases mainly for Mutation Analysis:
Identical sequences with varying lengths at the 5' end
>>> germline_from_imgt("Testfiles/alleles/varyinglengths.txt", "Testfiles/alleles/imgt_germlines.fasta", "Homo sapiens", "Testfiles/alleles/varyinglengths_out.fasta", "v", doctest_report, doctest_report, 3)
Processing successfully completed.
Mutation Analysis:
IGHV4-34*01 (5 sequences):
Common mutations: c67g, g88c, t96g, a97c, g103c, t158a, a165g, g166c, c179t, c180g, c181t, g182t, a189g, t207c, c208a, a209c, g210a, a212t, c221t, a229g, g230a, a241g, c248t, t249g, g273a, t274a, g275a, t278c, a280t
germline: caggtgcagctacagcagtggggcgcaggactgttgaagccttcggagaccctgtccctcacctgcgctgtctatggtgggtccttcagtggttactactggagctggatccgccagcccccagggaaggggctggagtggattggggaaatcaatcatagtggaagcaccaactacaacccgtccctcaagagtcgagtcaccatatcagtagacacgtccaagaaccagttctccctgaagctgagctctgtgaccgccgcggacacggctgtgtattactgtgcgagagg
consensus: .........ctacagcagtggggcgcaggactgttgaagccttcggagaccctgtccctcacctgcgGtgtctatggtgggtccttcaCtggttacGCctggaCctggatccgccagcccccagggaaggggctggagtggattggggaaatcaatcaAagtggaGCcaccaactacaaTGTTtccctcGagagtcgagtcaccataCACAtTgacacgtcTaagaaccGAttctccctgaGgctgagTGctgtgaccgccgcggacacggctAAAtaCtTctgtgcgagagg
Identical sequences with one common deletion and an adjacent deletion that is not present in all sequences
>>> germline_from_imgt("Testfiles/alleles/somedeletions.txt", "Testfiles/alleles/imgt_germlines.fasta", "Homo sapiens", "Testfiles/alleles/somedeletions_out.fasta", "v", doctest_report, doctest_report, 3)
Processing successfully completed.
Mutation Analysis:
IGHV4-34*01 (5 sequences):
Common deletions: 156, 157, 158
Common mutations: c67g, g88c, t96g, a97c, g103c, a165g, g166c, c179t, c180g, c181t, g182t, a189g, t207c, c208a, a209c, g210a, a212t, c221t, a229g, g230a, a241g, c248t, t249g, g273a, t274a, g275a, t278c, a280t
germline: caggtgcagctacagcagtggggcgcaggactgttgaagccttcggagaccctgtccctcacctgcgctgtctatggtgggtccttcagtggttactactggagctggatccgccagcccccagggaaggggctggagtggattggggaaatcaatcatagtggaagcaccaactacaacccgtccctcaagagtcgagtcaccatatcagtagacacgtccaagaaccagttctccctgaagctgagctctgtgaccgccgcggacacggctgtgtattactgtgcgagagg
consensus: ...gtgcagctacagcagtggggcgcaggactgttgaagccttcggagaccctgtccctcacctgcgGtgtctatggtgggtccttcaCtggttacGCctggaCctggatccgccagcccccagggaaggggctggagtggattggggaaatcXXX---agtggaGCcaccaactacaaTGTTtccctcGagagtcgagtcaccataCACAtTgacacgtcTaagaaccGAttctccctgaGgctgagTGctgtgaccgccgcggacacggctAAAtaCtTctgtgcgagagg
Identical sequences with a common deletion
>>> germline_from_imgt("Testfiles/alleles/deletions.txt", "Testfiles/alleles/imgt_germlines.fasta", "Homo sapiens", "Testfiles/alleles/deletions_out.fasta", "v", doctest_report, doctest_report, 3)
Processing successfully completed.
Mutation Analysis:
IGHV4-34*01 (5 sequences):
Common deletions: 153, 154, 155, 156, 157, 158
Common mutations: c67g, g88c, t96g, a97c, g103c, a165g, g166c, c179t, c180g, c181t, g182t, a189g, t207c, c208a, a209c, g210a, a212t, c221t, a229g, g230a, a241g, c248t, t249g, g273a, t274a, g275a, t278c, a280t
germline: caggtgcagctacagcagtggggcgcaggactgttgaagccttcggagaccctgtccctcacctgcgctgtctatggtgggtccttcagtggttactactggagctggatccgccagcccccagggaaggggctggagtggattggggaaatcaatcatagtggaagcaccaactacaacccgtccctcaagagtcgagtcaccatatcagtagacacgtccaagaaccagttctccctgaagctgagctctgtgaccgccgcggacacggctgtgtattactgtgcgagagg
consensus: ...gtgcagctacagcagtggggcgcaggactgttgaagccttcggagaccctgtccctcacctgcgGtgtctatggtgggtccttcaCtggttacGCctggaCctggatccgccagcccccagggaaggggctggagtggattggggaaatc------agtggaGCcaccaactacaaTGTTtccctcGagagtcgagtcaccataCACAtTgacacgtcTaagaaccGAttctccctgaGgctgagTGctgtgaccgccgcggacacggctAAAtaCtTctgtgcgagagg
Test with 5 identical sequences
>>> germline_from_imgt("Testfiles/alleles/identical.txt", "Testfiles/alleles/imgt_germlines.fasta", "Homo sapiens", "Testfiles/alleles/identical_out.fasta", "v", doctest_report, doctest_report, 3)
Processing successfully completed.
Mutation Analysis:
IGHV4-34*01 (5 sequences):
Common mutations: c67g, g88c, t96g, a97c, g103c, t158a, a165g, g166c, c179t, c180g, c181t, g182t, a189g, t207c, c208a, a209c, g210a, a212t, c221t, a229g, g230a, a241g, c248t, t249g, g273a, t274a, g275a, t278c, a280t
germline: caggtgcagctacagcagtggggcgcaggactgttgaagccttcggagaccctgtccctcacctgcgctgtctatggtgggtccttcagtggttactactggagctggatccgccagcccccagggaaggggctggagtggattggggaaatcaatcatagtggaagcaccaactacaacccgtccctcaagagtcgagtcaccatatcagtagacacgtccaagaaccagttctccctgaagctgagctctgtgaccgccgcggacacggctgtgtattactgtgcgagagg
consensus: ...gtgcagctacagcagtggggcgcaggactgttgaagccttcggagaccctgtccctcacctgcgGtgtctatggtgggtccttcaCtggttacGCctggaCctggatccgccagcccccagggaaggggctggagtggattggggaaatcaatcaAagtggaGCcaccaactacaaTGTTtccctcGagagtcgagtcaccataCACAtTgacacgtcTaagaaccGAttctccctgaGgctgagTGctgtgaccgccgcggacacggctAAAtaCtTctgtgcgagagg
Test with two mutations removed from the first sequence
>>> germline_from_imgt("Testfiles/alleles/two_dropped.txt", "Testfiles/alleles/imgt_germlines.fasta", "Homo sapiens", "Testfiles/alleles/two_dropped_out.fasta", "v", doctest_report, doctest_report, 3)
Processing successfully completed.
Mutation Analysis:
IGHV4-34*01 (5 sequences):
Common mutations: t96g, a97c, g103c, t158a, a165g, g166c, c179t, c180g, c181t, g182t, a189g, t207c, c208a, a209c, g210a, a212t, c221t, a229g, g230a, a241g, c248t, t249g, g273a, t274a, g275a, t278c, a280t
germline: caggtgcagctacagcagtggggcgcaggactgttgaagccttcggagaccctgtccctcacctgcgctgtctatggtgggtccttcagtggttactactggagctggatccgccagcccccagggaaggggctggagtggattggggaaatcaatcatagtggaagcaccaactacaacccgtccctcaagagtcgagtcaccatatcagtagacacgtccaagaaccagttctccctgaagctgagctctgtgaccgccgcggacacggctgtgtattactgtgcgagagg
consensus: ...gtgcagctacagcagtggggcgcaggactgttgaagccttcggagaccctgtccctcacctgcgXtgtctatggtgggtccttcaXtggttacGCctggaCctggatccgccagcccccagggaaggggctggagtggattggggaaatcaatcaAagtggaGCcaccaactacaaTGTTtccctcGagagtcgagtcaccataCACAtTgacacgtcTaagaaccGAttctccctgaGgctgagTGctgtgaccgccgcggacacggctAAAtaCtTctgtgcgagagg
"""
for char in option:
if char not in 'ciofvjx':
error('unrecognised option: %s.' % char)
return
try:
gl = Germlib(species_name, germline_file=germline_lib)
except:
report("Error parsing germline library file: " + str(sys.exc_info()[1]))
return
consensus_f = []
consensus_v = []
consensus_j = []
mutated_germs = {}
imgt_nt = {}
try:
with open(sequence_file, "r") as sequence_handle:
ln = sequence_handle.readline()
sep = ("\t" if "\t" in ln else ",")
sequence_handle.seek(0)
reader = csv.DictReader(sequence_handle, delimiter=sep)
for row in reader:
imgt_nt[row["Sequence ID"]] = row
outrecs = []
for id, nt_rec in imgt_nt.iteritems():
try:
if "JUNCTION" in nt_rec and nt_rec["JUNCTION"] != None and len(nt_rec["JUNCTION"]) > 0:
heavychain = len(nt_rec["V-D-J-REGION"]) > 0
if heavychain:
mAb = (nt_rec["V-REGION"],
nt_rec.get("P3'V", ""),
nt_rec.get("N-REGION", ""),
nt_rec.get("N1-REGION", ""),
nt_rec.get("P5'D", ""),
nt_rec.get("D-REGION", ""),
nt_rec.get("P3'D", ""),
nt_rec.get("N2-REGION", ""),
nt_rec.get("P5'J", ""),
nt_rec["J-REGION"])
else:
mAb = (nt_rec["V-REGION"],
nt_rec.get("P3'V", ""),
nt_rec.get("N-REGION", ""),
nt_rec.get("P5'J", ""),
nt_rec["J-REGION"])
if 'x' in option:
report("%s:" % id)
report(" | ".join(mAb))
# Revert the part of the V-gene that extends to the second Cysteine
vregion = nt_rec["V-REGION"]
vregion_3prime = nt_rec["3'V-REGION"]
vgene_name = Germlib.translate_imgt_name(nt_rec["V-GENE and allele"])
if len(vregion_3prime) > 0 and vregion[0 - len(vregion_3prime):] != vregion_3prime:
report("Error: 3'V-REGION sequence not found at 3' end of V-REGION in sequence %s" % id)
continue
# Remove stray nucleotides from the 5' end of the V-region to give us whole codons (we know the 3' end is aligned)
vregion_5prime = vregion[:0 - len(vregion_3prime)] if len(vregion_3prime) > 0 else vregion
vregion_5prime = (vregion_5prime if len(vregion_5prime) % 3 == 0 else vregion_5prime[(len(vregion_5prime) % 3):])
try:
vgene_frag1, matchstr_frag1 = gl.match_from_aa(vgene_name, vregion_5prime)
# For the remaining (3') part, use a global alignment. We use the entire V-region so that the 3prime
# region, which might be quite small, aligns against the right part of the sequence
vgene_frag2, matchstr_frag2 = gl.match(vgene_name, vregion)[0 - len(vregion_3prime):] if len(vregion_3prime) > 0 else ("", "")
if fixed_mut > 0:
# Merge the two matchstrings. Starting at the 3' end, we pull matchstring off frag2 until we get beyond vgene_frag2 and are
# about to pull the first nt of vgene_frag1. Then we pull the rest off vgene_frag1.
mlen = 0
matchstr = ""
for m in matchstr_frag2[::-1]:
if m != 'd':
mlen += 1
if mlen > len(vregion_3prime):
break
matchstr += m
skip = True
for m in matchstr_frag1[::-1]:
if skip and m != 'd':
skip = False
if not skip:
matchstr += m
matchstr = matchstr[::-1]
# Sanity check 1 - number of nucleotides in match string should match length of v-region
mlen = sum((n != 'd') for n in matchstr)
if len(vregion_5prime) + len(vregion_3prime) != mlen:
report("Error in match string length for sequence %s" % id)
# Sanity check 2 - check matchstring is consistent
vgene = str(gl.seq(vgene_name).seq)
mismatch = False
gt = iter(vgene)
vt = iter(vregion)
for m in matchstr:
if m == 'd':
next(gt)
elif m == 'i':
next(vt)
elif m == 'm':
if next(gt) != next(vt):
mismatch = True
else:
if next(gt) == next(vt):
mismatch = True
if mismatch:
report("Error in matchstring for sequence %s:\nvgene: %s\nseq : %s\nmatch: %s\n" % (id, vgene, vregion, matchstr))
else:
en = mutated_germs.get(vgene_name, [])
en.append((vregion, matchstr))
mutated_germs[vgene_name] = en
if nt_rec["J-GENE and allele"] != '':
jgene_name = Germlib.translate_imgt_name(nt_rec["J-GENE and allele"])
jgene_frag, _ = gl.match(jgene_name, nt_rec["J-REGION"])
else:
jgene_frag = ''
if heavychain and nt_rec["D-GENE and allele"] != '':
dgene_name = Germlib.translate_imgt_name(nt_rec["D-GENE and allele"])
dgene_frag, _ = gl.match(dgene_name, nt_rec["D-REGION"])
else:
dgene_frag = ''
except:
report("Error processing sequence " + id + ":")
exc_type, exc_value, exc_traceback = sys.exc_info()
report(traceback.format_exception(exc_type, exc_value, exc_traceback, 2))
continue
if heavychain:
germline = [
vgene_frag1 + vgene_frag2,
nt_rec.get("P3'V", ""),
nt_rec.get("N-REGION", ""),
nt_rec.get("N1-REGION", ""),
nt_rec.get("P5'D", ""),
dgene_frag,
nt_rec.get("P3'D", ""),
nt_rec.get("N2-REGION", ""),
nt_rec.get("P5'J", ""),
jgene_frag]
else:
germline = [
vgene_frag1 + vgene_frag2,
nt_rec.get("P3'V", ""),
nt_rec.get("N-REGION", ""),
nt_rec.get("P5'J", ""),
jgene_frag]
jgene_frag = (jgene_frag if len("".join(germline)) % 3 == 0 else jgene_frag[:0-(len("".join(germline)) % 3)])
germline[-1] = jgene_frag
if 'i' in option:
trunc5 = len(vregion) - len(vregion_5prime + vregion_3prime)
if heavychain:
trunc3 = (len(nt_rec["V-D-J-REGION"]) - trunc5) % 3
if trunc3 != 0:
outrecs.append(SeqRecord(Seq(nt_rec["V-D-J-REGION"][trunc5:0-trunc3]), id=id, name=id, description=""))
else:
outrecs.append(SeqRecord(Seq(nt_rec["V-D-J-REGION"][trunc5:]), id=id, name=id, description=""))
else:
trunc3 = (len(nt_rec["V-J-REGION"]) - trunc5) % 3
if trunc3 != 0:
outrecs.append(SeqRecord(Seq(nt_rec["V-J-REGION"][trunc5:0-trunc3]), id=id, name=id, description=""))
else:
outrecs.append(SeqRecord(Seq(nt_rec["V-J-REGION"][trunc5:]), id=id, name=id, description=""))
if 'f' in option:
if 'x' in option:
report("Inferred 'full' germline:")
report(" | ".join(germline))
sr = SeqRecord(Seq("".join(germline)), id=id + "_germ", name=id + "_germ", description="")
consensus_f.append(sr)
if 'o' in option:
outrecs.append(sr)
def chunks(l, n):
""" Yield successive n-sized chunks from l."""
for i in xrange(0, len(l), n):
yield l[i:i + n]
germline = "".join(germline)
v_ext = vgene_frag1 + vgene_frag2
if 'v' in option:
g = (v_ext) + '-' * (len(germline) - len(v_ext))
germline_v = ""
for c in chunks(g, 3):
germline_v += c if '-' not in c else '-'*len(c)
if 'x' in option:
report("Inferred germline (v):")
report(germline_v)
sr = SeqRecord(Seq(germline_v), id=id + "_germ_v", name=id + "_germ_v", description="")
consensus_v.append(sr)
if 'o' in option:
outrecs.append(sr)
if 'j' in option:
if heavychain:
g = v_ext + '-' * (
len(nt_rec.get("P3'V", "")) +
len(nt_rec.get("N-REGION", "")) +
len(nt_rec.get("N1-REGION", "")) +
len(nt_rec.get("P5'D", ""))) + \
dgene_frag + \
'-' * (
len(nt_rec.get("P3'D", "")) +
len(nt_rec.get("N2-REGION", "")) +
len(nt_rec.get("P5'J", ""))) + \
jgene_frag
else:
g = v_ext + '-' * (len(germline) - len(v_ext) - len(jgene_frag)) + jgene_frag
germline_vj = ""
for c in chunks(g, 3):
germline_vj += c if '-' not in c else '-'*len(c)
if 'x' in option:
report("Inferred germline_vdj:")
report(germline_vj)
sr = SeqRecord(Seq(germline_vj), id=id + "_germ_vdj", name=id + "_germ_vdj", description="")
consensus_j.append(sr)
if 'o' in option:
outrecs.append(sr)
else:
report("%s: no junction." % id)
except:
report("Error processing input record " + id + ":")
exc_type, exc_value, exc_traceback = sys.exc_info()
report(traceback.format_exception(exc_type, exc_value, exc_traceback, 2))
report("Processing successfully completed.")
except:
report("Error parsing input file: " + str(sys.exc_info()[1]))
return
if 'c' in option:
try:
def checklengths(srs):
length = -1
for sr in srs:
if length < 0:
length = len(sr.seq)
elif len(sr.seq) != length:
report("Length error in sequence %s" % sr.id)
if 'f' in option:
checklengths(consensus_f)
summary = AlignInfo.SummaryInfo(MultipleSeqAlignment(consensus_f))
cd = summary.dumb_consensus(ambiguous="-")
consensus = ""
for c in chunks(cd, 3):
consensus += c if '-' not in c else '-'*len(c)
report("'Full' germline consensus:")
report(str(consensus))
outrecs.insert(0, SeqRecord(consensus, id="consensus_germ_full", name="consensus_germ_full", description=""))
if 'v' in option:
checklengths(consensus_v)
summary = AlignInfo.SummaryInfo(MultipleSeqAlignment(consensus_v))
cd = summary.dumb_consensus(ambiguous="-")
consensus = ""
for c in chunks(cd, 3):
consensus += c if '-' not in c else '-'*len(c)
report("Germline (v) consensus:")
report(str(consensus))
outrecs.insert(0, SeqRecord(consensus, id="consensus_germ_v", name="consensus_germ_v", description=""))
if 'j' in option:
checklengths(consensus_j)
summary = AlignInfo.SummaryInfo(MultipleSeqAlignment(consensus_j))
cd = summary.dumb_consensus(ambiguous="-")
consensus = ""
for c in chunks(cd, 3):
consensus += c if '-' not in c else '-'*len(c)
report("Germline vdj consensus:")
report(str(consensus))
outrecs.insert(0, SeqRecord(consensus, id="consensus_germ_vdj", name="consensus_germ_vdj", description=""))
except:
report("Error generating consensus: %s - %s" % (sys.exc_info()[0], sys.exc_info()[1]))
if fixed_mut > 0:
try:
report("Mutation Analysis, showing mutations, insertions and deletions that are common to all sequences from a given germline.")
report("This will be reported for all germlines for which there are at least %d sequences in the analysis:" % fixed_mut)
def m_limits(m):
# Find the upper and lower limits of the matchstr, ignoring leading and trailing deletions
# limits are expressed as locations relative to the germline (insertions in the matchstr are ignored)
for i in range(len(m)):
if m[i] != 'd':
mstart = i
break
for i in range(len(m)-1, -1, -1):
if m[i] != 'd' and m[i] != 'i':
mend = i
break
loc = 0
for i in range(len(m)):
if i == mstart:
start = loc
elif i == mend:
end = loc
if m[i] != 'i':
loc += 1
return (start, end)
for germline, mg in mutated_germs.iteritems():
if len(mg) >= fixed_mut:
# given that the sequences may have different start and end points, compute
# the range over which we have coverage from a sufficient number of sequences
germseq = gl.seq(germline).seq
coverage = [0] * len(germseq)
for seq, matchstr in mg:
start, end = m_limits(matchstr)
for i in range(start, end+1):
coverage[i] += 1
range_start = 999
range_end = -1
for i, val in enumerate(coverage):
if val >= fixed_mut:
if range_start > i:
range_start = i
if range_end < i:
range_end = i
# matches[loc] holds:
# 'u' if this location has not as yet been observed in sequences processed
# 'm' if it has been observed to match the germline in sequences processed so far
# 'c,g,a,t' if it has been observed to be mutated to that value in sequences processed so far
# 'm' if it has been observed to be deleted in sequences processed so far
# 'x' if if the results at this location are not consistent between sequences
matches = ['u'] * len(germseq)
insertions = []
range_encountered_start = 999
range_encountered_end = -1
for seq, matchstr in mg:
ins = 0
loc = 0
inserts = []
(start, end) = m_limits(matchstr)
start = max(start, range_start)
end = min(end, range_end)
s = iter(seq)
for m in matchstr:
if m != 'i':
ins = 0
if m == 'n':
sub = next(s)
if loc >= start and loc <= end:
if matches[loc] == 'u':
matches[loc] = sub
elif matches[loc] != sub:
matches[loc] = 'x'
loc += 1
elif m == 'd':
if loc >= start and loc <= end:
if matches[loc] == 'u':
matches[loc] = 'd'
elif matches[loc] != 'd':
matches[loc] = 'x'
loc += 1
elif m == 'i':
if loc >= start and loc <= end:
inserts.append((loc, ins))
ins += 1
next(s)
else:
if loc >= start and loc <= end:
if matches[loc] == 'u':
matches[loc] = 'm'
elif matches[loc] != 'm':
matches[loc] = 'x'
loc += 1
next(s)
# Add a new insertion to the consensus list if we see it in this sequence, and it is outside
# the range we've encountered so far.
for loc, ins in inserts:
if loc < range_encountered_start or loc > range_encountered_end:
insertions.append((loc, ins))
# Remove insertions from the consensus list if they are in range of this sequence and were not
# observed in it
for loc, ins in insertions:
if loc >= start and loc <= end:
if not (loc, ins) in inserts:
insertions.remove((loc, ins))
range_encountered_start = min(range_encountered_start, start)
range_encountered_end = max(range_encountered_end, end)
report("%s (%d sequences):" % (germline, len(mg)))
deletions = []
for loc, m in enumerate(matches):
if m == 'd':
deletions.append(loc)
if len(deletions) > 0:
report(" Common deletions: %s" % ', '.join([str(n) for n in sorted(deletions)]))
if len(insertions) > 0:
report(" Common insertions: %s") % ', '.join(["%d.%d" % (loc, ins) for (loc, ins) in sorted(insertions)])
mutations = []
for loc, m in enumerate(matches):
if m in ('c', 'a', 'g', 't'):
mutations.append("%s%d%s" % (germseq[loc], loc, m))
if len(mutations) > 0:
report(" Common mutations: %s" % ', '.join([str(n) for n in mutations]))
if len(insertions) + len(deletions) + len(mutations) > 0:
r_g = ""
gi = iter(germseq)
for m in matches:
r_g += next(gi) if m != 'i' else '-'
report( "germline: %s" % r_g)
r_c = ""
gi = iter(germseq)
for m in matches:
if m == 'm':
r_c += next(gi)
elif m == 'd':
r_c += '-'
next(gi)
elif m == 'i':
r_c += 'i'
elif m == 'u':
r_c += '.'
next(gi)
else:
r_c += m.upper()
next(gi)
report( "consensus: %s" % r_c)
else:
report(" No common insertions, deletions or mutations compared to gertmline")
else:
report("%s (%d sequences) - number of sequences is below analysis threshold." % (germline, len(mg)))
except:
report("Error creating mutation report:")
exc_type, exc_value, exc_traceback = sys.exc_info()
report(traceback.format_exception(exc_type, exc_value, exc_traceback, 2))
SeqIO.write(outrecs, output_file, "fasta")
def doctest_report(x):
print(x)
if __name__ == "__main__":
import doctest
doctest.testmod()
| [
"csv.DictReader",
"Germlib.Germlib",
"Bio.SeqRecord.SeqRecord",
"Bio.Seq.Seq",
"traceback.format_exception",
"sys.exc_info",
"doctest.testmod",
"Bio.SeqIO.write",
"Bio.Align.MultipleSeqAlignment",
"Germlib.Germlib.translate_imgt_name"
] | [((33302, 33344), 'Bio.SeqIO.write', 'SeqIO.write', (['outrecs', 'output_file', '"""fasta"""'], {}), "(outrecs, output_file, 'fasta')\n", (33313, 33344), False, 'from Bio import SeqIO\n'), ((33433, 33450), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (33448, 33450), False, 'import doctest\n'), ((9497, 9546), 'Germlib.Germlib', 'Germlib', (['species_name'], {'germline_file': 'germline_lib'}), '(species_name, germline_file=germline_lib)\n', (9504, 9546), False, 'from Germlib import Germlib\n'), ((9980, 10026), 'csv.DictReader', 'csv.DictReader', (['sequence_handle'], {'delimiter': 'sep'}), '(sequence_handle, delimiter=sep)\n', (9994, 10026), False, 'import csv\n'), ((33184, 33198), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (33196, 33198), False, 'import sys\n'), ((11655, 11711), 'Germlib.Germlib.translate_imgt_name', 'Germlib.translate_imgt_name', (["nt_rec['V-GENE and allele']"], {}), "(nt_rec['V-GENE and allele'])\n", (11682, 11711), False, 'from Germlib import Germlib\n'), ((22140, 22154), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (22152, 22154), False, 'import sys\n'), ((22887, 22920), 'Bio.Align.MultipleSeqAlignment', 'MultipleSeqAlignment', (['consensus_f'], {}), '(consensus_f)\n', (22907, 22920), False, 'from Bio.Align import MultipleSeqAlignment\n'), ((23245, 23339), 'Bio.SeqRecord.SeqRecord', 'SeqRecord', (['consensus'], {'id': '"""consensus_germ_full"""', 'name': '"""consensus_germ_full"""', 'description': '""""""'}), "(consensus, id='consensus_germ_full', name='consensus_germ_full',\n description='')\n", (23254, 23339), False, 'from Bio.SeqRecord import SeqRecord\n'), ((23457, 23490), 'Bio.Align.MultipleSeqAlignment', 'MultipleSeqAlignment', (['consensus_v'], {}), '(consensus_v)\n', (23477, 23490), False, 'from Bio.Align import MultipleSeqAlignment\n'), ((23812, 23900), 'Bio.SeqRecord.SeqRecord', 'SeqRecord', (['consensus'], {'id': '"""consensus_germ_v"""', 'name': '"""consensus_germ_v"""', 'description': '""""""'}), "(consensus, id='consensus_germ_v', name='consensus_germ_v',\n description='')\n", (23821, 23900), False, 'from Bio.SeqRecord import SeqRecord\n'), ((24018, 24051), 'Bio.Align.MultipleSeqAlignment', 'MultipleSeqAlignment', (['consensus_j'], {}), '(consensus_j)\n', (24038, 24051), False, 'from Bio.Align import MultipleSeqAlignment\n'), ((24373, 24465), 'Bio.SeqRecord.SeqRecord', 'SeqRecord', (['consensus'], {'id': '"""consensus_germ_vdj"""', 'name': '"""consensus_germ_vdj"""', 'description': '""""""'}), "(consensus, id='consensus_germ_vdj', name='consensus_germ_vdj',\n description='')\n", (24382, 24465), False, 'from Bio.SeqRecord import SeqRecord\n'), ((33218, 33283), 'traceback.format_exception', 'traceback.format_exception', (['exc_type', 'exc_value', 'exc_traceback', '(2)'], {}), '(exc_type, exc_value, exc_traceback, 2)\n', (33244, 33283), False, 'import traceback\n'), ((22178, 22243), 'traceback.format_exception', 'traceback.format_exception', (['exc_type', 'exc_value', 'exc_traceback', '(2)'], {}), '(exc_type, exc_value, exc_traceback, 2)\n', (22204, 22243), False, 'import traceback\n'), ((9620, 9634), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (9632, 9634), False, 'import sys\n'), ((15924, 15980), 'Germlib.Germlib.translate_imgt_name', 'Germlib.translate_imgt_name', (["nt_rec['J-GENE and allele']"], {}), "(nt_rec['J-GENE and allele'])\n", (15951, 15980), False, 'from Germlib import Germlib\n'), ((16267, 16323), 'Germlib.Germlib.translate_imgt_name', 'Germlib.translate_imgt_name', (["nt_rec['D-GENE and allele']"], {}), "(nt_rec['D-GENE and allele'])\n", (16294, 16323), False, 'from Germlib import Germlib\n'), ((16652, 16666), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (16664, 16666), False, 'import sys\n'), ((20238, 20253), 'Bio.Seq.Seq', 'Seq', (['germline_v'], {}), '(germline_v)\n', (20241, 20253), False, 'from Bio.Seq import Seq\n'), ((21703, 21719), 'Bio.Seq.Seq', 'Seq', (['germline_vj'], {}), '(germline_vj)\n', (21706, 21719), False, 'from Bio.Seq import Seq\n'), ((22386, 22400), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (22398, 22400), False, 'import sys\n'), ((16698, 16763), 'traceback.format_exception', 'traceback.format_exception', (['exc_type', 'exc_value', 'exc_traceback', '(2)'], {}), '(exc_type, exc_value, exc_traceback, 2)\n', (16724, 16763), False, 'import traceback\n'), ((24539, 24553), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (24551, 24553), False, 'import sys\n'), ((24558, 24572), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (24570, 24572), False, 'import sys\n'), ((18275, 18321), 'Bio.Seq.Seq', 'Seq', (["nt_rec['V-D-J-REGION'][trunc5:0 - trunc3]"], {}), "(nt_rec['V-D-J-REGION'][trunc5:0 - trunc3])\n", (18278, 18321), False, 'from Bio.Seq import Seq\n'), ((18445, 18481), 'Bio.Seq.Seq', 'Seq', (["nt_rec['V-D-J-REGION'][trunc5:]"], {}), "(nt_rec['V-D-J-REGION'][trunc5:])\n", (18448, 18481), False, 'from Bio.Seq import Seq\n'), ((18725, 18769), 'Bio.Seq.Seq', 'Seq', (["nt_rec['V-J-REGION'][trunc5:0 - trunc3]"], {}), "(nt_rec['V-J-REGION'][trunc5:0 - trunc3])\n", (18728, 18769), False, 'from Bio.Seq import Seq\n'), ((18893, 18927), 'Bio.Seq.Seq', 'Seq', (["nt_rec['V-J-REGION'][trunc5:]"], {}), "(nt_rec['V-J-REGION'][trunc5:])\n", (18896, 18927), False, 'from Bio.Seq import Seq\n')] |
from typing import List
from webbrowser import get
from fastapi import APIRouter, Response
from .schemas import TodoItem, TodoPayload, UserPayload #,User
#-----Agregado jtortolero-----
from sqlalchemy.orm import Session
from fastapi import Depends, HTTPException, status
from .models import Item, User
from .utils import password_hash
from .oauth2 import *
#-----------------------------
router = APIRouter()
@router.get("/items/", response_model=List[TodoItem])
def get_items(db:Session=Depends(get_db)):
"""Retrieve a persistent list of items."""
# TODO: Implement this
items = db.query(Item).filter(Item.title).all()
return items
@router.get("/items/{id}", response_model=TodoItem)
def get_item(id: int, db:Session=Depends(get_db)):
"""Retrieve a particular item from the store."""
# TODO: Implement this.
item = db.query(Item).filter(Item.id==id).first()
if not item:
raise HTTPException(status_code = status.HTTP_404_NOT_FOUND,
detail=f'Item con id={id} no existe')
return item
@router.post(
path="/items/",
response_model=TodoItem,
status_code=201,
response_description="The item has been created successfully.",
)
def create_item(payload: TodoPayload, db:Session=Depends(get_db), current_user:User=Depends(get_current_user)):
"""Add an item to the store."""
# TODO: Implement this.
# Requirements:
# * Ensure an user is authenticated with basic credentials.
# * Add the username to the item.
new_item = Item(user_id=current_user.id, **Item.dict())
db.add(new_item)
db.commit()
db.refresh(new_item)
return new_item
@router.put("/items/{id}", response_model=TodoItem)
def update_item(id: int, payload: TodoPayload, db:Session=Depends(get_db),
current_user: User=Depends(get_current_user)):
# TODO: Implement this.
# * Ensure the user is authenticated. If not, either return a 401 response
# or raise an `HttpException` with a 401 code.
# * Ensure that the item is stored already in the datastore. If not, raise
# an `HttpException` with a 404 code or return a 404 response.
# * Check the username matches the item's username. If not, return a 403
# response or raise a `HttpException` with a 403 code.
# * Apply the update and save it to the database.
item_query = db.query(Item).filter(Item.id == id)
item_update = item_query.first()
if item_update == None:
raise HTTPException(status.HTTP_404_NOT_FOUND,
detail=f"post {id} no fue encontrado")
if item_update.user_id != current_user.id:
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN,
detail="No esta autorizado para realizar esta accion")
item_query.update(payload.dict(), synchronize_session=False)
db.commit()
return item_query.first()
@router.delete("/items/{id}", response_class=Response, status_code=204)
def remove_item(id: int, db:Session=Depends(get_db), current_user: int= Depends(get_current_user)):
# TODO: Implement this
# 1. Check that the item exists in the datastore.
# 2. Ensure the user is authenticated.
# 3. Check if the currently logged username matches.
# 4. Remove the item from the store.
item_query = db.query(Item).filter(Item.id == id)
item = item_query.first()
if item == None:
raise HTTPException(status.HTTP_404_NOT_FOUND,
detail=f"Item {id} no fue encontrado")
if item.user_id != current_user.id:
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN,
detail="No esta autorizado para realizar esta accion")
item_query.delete(synchronize_session=False)
db.commit()
return Response(status_code=status.HTTP_204_NO_CONTENT)
@router.post("/users/")
def create_user(payload: UserPayload, db:Session=Depends(get_db)):
# TODO: Implement this.
# 1. Validate the username has no uppercase letter, @ sign, nor
# punctuations.
# 2. Hash the password and store the user in the data store.
user = User(
name = payload.name,
username = payload.username,
email = payload.email,
password = password_hash(payload.password)
)
db.add(user)
db.commit()
db.refresh(user)
return user
# TODO: Document this endpoint
@router.get("/users/me")
def get_current_user():
user = get_current_user
return user
| [
"fastapi.HTTPException",
"fastapi.APIRouter",
"fastapi.Response",
"fastapi.Depends"
] | [((400, 411), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (409, 411), False, 'from fastapi import APIRouter, Response\n'), ((493, 508), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (500, 508), False, 'from fastapi import Depends, HTTPException, status\n'), ((741, 756), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (748, 756), False, 'from fastapi import Depends, HTTPException, status\n'), ((1251, 1266), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (1258, 1266), False, 'from fastapi import Depends, HTTPException, status\n'), ((1286, 1311), 'fastapi.Depends', 'Depends', (['get_current_user'], {}), '(get_current_user)\n', (1293, 1311), False, 'from fastapi import Depends, HTTPException, status\n'), ((1755, 1770), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (1762, 1770), False, 'from fastapi import Depends, HTTPException, status\n'), ((1808, 1833), 'fastapi.Depends', 'Depends', (['get_current_user'], {}), '(get_current_user)\n', (1815, 1833), False, 'from fastapi import Depends, HTTPException, status\n'), ((2997, 3012), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (3004, 3012), False, 'from fastapi import Depends, HTTPException, status\n'), ((3033, 3058), 'fastapi.Depends', 'Depends', (['get_current_user'], {}), '(get_current_user)\n', (3040, 3058), False, 'from fastapi import Depends, HTTPException, status\n'), ((3778, 3826), 'fastapi.Response', 'Response', ([], {'status_code': 'status.HTTP_204_NO_CONTENT'}), '(status_code=status.HTTP_204_NO_CONTENT)\n', (3786, 3826), False, 'from fastapi import APIRouter, Response\n'), ((3902, 3917), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (3909, 3917), False, 'from fastapi import Depends, HTTPException, status\n'), ((925, 1020), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_404_NOT_FOUND', 'detail': 'f"""Item con id={id} no existe"""'}), "(status_code=status.HTTP_404_NOT_FOUND, detail=\n f'Item con id={id} no existe')\n", (938, 1020), False, 'from fastapi import Depends, HTTPException, status\n'), ((2469, 2548), 'fastapi.HTTPException', 'HTTPException', (['status.HTTP_404_NOT_FOUND'], {'detail': 'f"""post {id} no fue encontrado"""'}), "(status.HTTP_404_NOT_FOUND, detail=f'post {id} no fue encontrado')\n", (2482, 2548), False, 'from fastapi import Depends, HTTPException, status\n'), ((2639, 2751), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_403_FORBIDDEN', 'detail': '"""No esta autorizado para realizar esta accion"""'}), "(status_code=status.HTTP_403_FORBIDDEN, detail=\n 'No esta autorizado para realizar esta accion')\n", (2652, 2751), False, 'from fastapi import Depends, HTTPException, status\n'), ((3402, 3481), 'fastapi.HTTPException', 'HTTPException', (['status.HTTP_404_NOT_FOUND'], {'detail': 'f"""Item {id} no fue encontrado"""'}), "(status.HTTP_404_NOT_FOUND, detail=f'Item {id} no fue encontrado')\n", (3415, 3481), False, 'from fastapi import Depends, HTTPException, status\n'), ((3565, 3677), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_403_FORBIDDEN', 'detail': '"""No esta autorizado para realizar esta accion"""'}), "(status_code=status.HTTP_403_FORBIDDEN, detail=\n 'No esta autorizado para realizar esta accion')\n", (3578, 3677), False, 'from fastapi import Depends, HTTPException, status\n')] |
from setuptools import setup
setup(
name='pytransmute',
version='0.1.0',
packages=['pytransmute', 'pytransmute.plugin'],
package_data={"pytransmute": ["py.typed"]},
url='https://github.com/leeshangqian/pytransmute',
license='Apache-2.0 License',
author='<NAME>',
author_email='<EMAIL>',
description='Safer dataclass conversions',
python_requires="!=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*, !=3.7.*, !=3.8.*,",
install_requires=[
"mypy"
]
)
| [
"setuptools.setup"
] | [((30, 484), 'setuptools.setup', 'setup', ([], {'name': '"""pytransmute"""', 'version': '"""0.1.0"""', 'packages': "['pytransmute', 'pytransmute.plugin']", 'package_data': "{'pytransmute': ['py.typed']}", 'url': '"""https://github.com/leeshangqian/pytransmute"""', 'license': '"""Apache-2.0 License"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'description': '"""Safer dataclass conversions"""', 'python_requires': '"""!=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*, !=3.7.*, !=3.8.*,"""', 'install_requires': "['mypy']"}), "(name='pytransmute', version='0.1.0', packages=['pytransmute',\n 'pytransmute.plugin'], package_data={'pytransmute': ['py.typed']}, url=\n 'https://github.com/leeshangqian/pytransmute', license=\n 'Apache-2.0 License', author='<NAME>', author_email='<EMAIL>',\n description='Safer dataclass conversions', python_requires=\n '!=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*, !=3.7.*, !=3.8.*,'\n , install_requires=['mypy'])\n", (35, 484), False, 'from setuptools import setup\n')] |
from torch.autograd import Variable
import torch.nn.functional as F
import scripts.utils as utils
import torch.nn as nn
import numpy as np
import torch
class CrossEntropy2d(nn.Module):
def __init__(self, size_average=True, ignore_label=255):
super(CrossEntropy2d, self).__init__()
self.size_average = size_average
self.ignore_label = ignore_label
def forward(self, predict, target, weight=None):
"""
Args:
predict:(n, c, h, w)
target:(n, h, w)
weight (Tensor, optional): a manual rescaling weight given to each class.
If given, has to be a Tensor of size "nclasses"
"""
assert not target.requires_grad
assert predict.dim() == 4
assert target.dim() == 3
assert predict.size(0) == target.size(0), "{0} vs {1} ".format(predict.size(0), target.size(0))
assert predict.size(2) == target.size(1), "{0} vs {1} ".format(predict.size(2), target.size(1))
assert predict.size(3) == target.size(2), "{0} vs {1} ".format(predict.size(3), target.size(3))
n, c, h, w = predict.size()
target_mask = (target >= 0) * (target != self.ignore_label)
target = target[target_mask]
predict = predict.transpose(1, 2).transpose(2, 3).contiguous()
predict = predict[target_mask.view(n, h, w, 1).repeat(1, 1, 1, c)].view(-1, c)
loss = F.cross_entropy(predict, target, weight=weight, size_average=self.size_average)
return loss
def cross_entropy2d(input, target, weight=None, size_average=True):
# 1. input: (n, c, h, w), target: (n, h, w)
n, c, h, w = input.size()
# 2. log_p: (n, c, h, w)
log_p = F.log_softmax(input, dim=1)
# 3. log_p: (n*h*w, c) - contiguous() required if transpose() is used before view().
log_p = log_p.transpose(1, 2).transpose(2, 3).contiguous().view(-1, c)
log_p = log_p[target.view(n * h * w, 1).repeat(1, c) >= 0]
log_p = log_p.view(-1, c)
# 4. target: (n*h*w,)
mask = target >= 0
target = target[mask]
loss = F.nll_loss(log_p, target, ignore_index=250, weight=weight, size_average=False)
if size_average:
loss /= mask.data.sum()
# loss /= mask.sum().data[0]
return loss
def bootstrapped_cross_entropy2d(input, target, K, weight=None, size_average=False):
"""A categorical cross entropy loss for 4D tensors.
We assume the following layout: (batch, classes, height, width)
Args:
input: The outputs.
target: The predictions.
K: The number of pixels to select in the bootstrapping process.
The total number of pixels is determined as 512 * multiplier.
Returns:
The pixel-bootstrapped cross entropy loss.
"""
batch_size = input.size()[0]
def _bootstrap_xentropy_single(input, target, K, weight=None, size_average=False):
n, c, h, w = input.size()
# 1. The log softmax. log_p: (n, c, h, w)
log_p = F.log_softmax(input, dim=1)
# 2. log_p: (n*h*w, c) - contiguous() required if transpose() is used before view().
log_p = log_p.transpose(1, 2).transpose(2, 3).contiguous().view(-1, c)
log_p = log_p[target.view(n * h * w, 1).repeat(1, c) >= 0]
log_p = log_p.view(-1, c)
# 3. target: (n*h*w,)
mask = target >= 0
target = target[mask]
loss = F.nll_loss(log_p, target, weight=weight, ignore_index=250,
reduce=False, size_average=size_average)
# For each element in the batch, collect the top K worst predictions
topk_loss, _ = loss.topk(K)
reduced_topk_loss = topk_loss.sum() / K
return reduced_topk_loss
loss = 0.0
# Bootstrap from each image not entire batch
for i in range(batch_size):
loss += _bootstrap_xentropy_single(input=torch.unsqueeze(input[i], 0),
target=torch.unsqueeze(target[i], 0),
K=K,
weight=weight,
size_average=size_average)
return loss / float(batch_size)
class FocalLoss2D(nn.Module):
"""
Focal Loss, which is proposed in:
"Focal Loss for Dense Object Detection (https://arxiv.org/abs/1708.02002v2)"
"""
def __init__(self, num_classes=19, ignore_label=250, alpha=0.25, gamma=2, size_average=True):
"""
Loss(x, class) = - \alpha (1-softmax(x)[class])^gamma \log(softmax(x)[class])
:param num_classes: (int) num of the classes
:param ignore_label: (int) ignore label
:param alpha: (1D Tensor or Variable) the scalar factor
:param gamma: (float) gamma > 0;
reduces the relative loss for well-classified examples (probabilities > .5),
putting more focus on hard, mis-classified examples
:param size_average: (bool): By default, the losses are averaged over observations for each mini-batch.
If the size_average is set to False, the losses are
instead summed for each mini-batch.
"""
super(FocalLoss2D, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.num_classes = num_classes
self.ignore_label = ignore_label
self.size_average = size_average
self.one_hot = Variable(torch.eye(self.num_classes))
def forward(self, cls_preds, cls_targets):
"""
:param cls_preds: (n, c, h, w)
:param cls_targets: (n, h, w)
:return:
"""
assert not cls_targets.requires_grad
assert cls_targets.dim() == 3
assert cls_preds.size(0) == cls_targets.size(0), "{0} vs {1} ".format(cls_preds.size(0), cls_targets.size(0))
assert cls_preds.size(2) == cls_targets.size(1), "{0} vs {1} ".format(cls_preds.size(2), cls_targets.size(1))
assert cls_preds.size(3) == cls_targets.size(2), "{0} vs {1} ".format(cls_preds.size(3), cls_targets.size(3))
if cls_preds.is_cuda:
self.one_hot = self.one_hot.cuda()
n, c, h, w = cls_preds.size()
# +++++++++++++++++++++++++++++++++++++++++++++++++++ #
# 1. target reshape and one-hot encode
# +++++++++++++++++++++++++++++++++++++++++++++++++++ #
# 1.1. target: (n*h*w,)
cls_targets = cls_targets.view(n * h * w, 1)
target_mask = (cls_targets >= 0) * (cls_targets != self.ignore_label)
cls_targets = cls_targets[target_mask]
cls_targets = self.one_hot.index_select(dim=0, index=cls_targets)
# +++++++++++++++++++++++++++++++++++++++++++++++++++ #
# 2. compute focal loss for multi-classification
# +++++++++++++++++++++++++++++++++++++++++++++++++++ #
# 2.1. The softmax. prob: (n, c, h, w)
prob = F.softmax(cls_preds, dim=1)
# 2.2. prob: (n*h*w, c) - contiguous() required if transpose() is used before view().
prob = prob.transpose(1, 2).transpose(2, 3).contiguous().view(-1, c)
prob = prob[target_mask.repeat(1, c)]
prob = prob.view(-1, c) # (n*h*w, c)
probs = torch.clamp((prob * cls_targets).sum(1).view(-1, 1), min=1e-8, max=1.0)
batch_loss = -self.alpha * (torch.pow((1 - probs), self.gamma)) * probs.log()
if self.size_average:
loss = batch_loss.mean()
else:
loss = batch_loss.sum()
return loss
class SemanticEncodingLoss(nn.Module):
def __init__(self, num_classes=19, ignore_label=250, alpha=0.25):
super(SemanticEncodingLoss, self).__init__()
self.alpha = alpha
self.num_classes = num_classes
self.ignore_label = ignore_label
def unique_encode(self, cls_targets):
batch_size, _, _ = cls_targets.size()
target_mask = (cls_targets >= 0) * (cls_targets != self.ignore_label)
cls_targets = [cls_targets[idx].masked_select(target_mask[idx]) for idx in np.arange(batch_size)]
# unique_cls = [np.unique(label.numpy(), return_counts=True) for label in cls_targets]
unique_cls = [np.unique(label.numpy()) for label in cls_targets]
encode = np.zeros((batch_size, self.num_classes), dtype=np.uint8)
for idx in np.arange(batch_size):
np.put(encode[idx], unique_cls[idx], 1)
return torch.from_numpy(encode).float()
def forward(self, predicts, enc_cls_target, size_average=True):
se_loss = F.binary_cross_entropy_with_logits(predicts, enc_cls_target, weight=None,
size_average=size_average)
return self.alpha * se_loss
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
# Lovasz-Softmax
# <NAME> 2018 ESAT-PSI KU Leuven
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
def lovasz_grad(gt_sorted):
"""
Computes gradient of the Lovasz extension w.r.t sorted errors
See Alg. 1 in paper
"""
p = len(gt_sorted)
gts = gt_sorted.sum()
intersection = gts - gt_sorted.float().cumsum(0)
union = gts + (1 - gt_sorted).float().cumsum(0)
jaccard = 1. - intersection / union
if p > 1: # cover 1-pixel case
jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]
return jaccard
def iou_binary(preds, labels, EMPTY=1., ignore=None, per_image=True):
"""
IoU for foreground class
binary: 1 foreground, 0 background
"""
if not per_image:
preds, labels = (preds,), (labels,)
ious = []
for pred, label in zip(preds, labels):
intersection = ((label == 1) & (pred == 1)).sum()
union = ((label == 1) | ((pred == 1) & (label != ignore))).sum()
if not union:
iou = EMPTY
else:
iou = float(intersection) / union
ious.append(iou)
iou = utils.mean(ious) # mean accross images if per_image
return 100 * iou
def iou(preds, labels, C, EMPTY=1., ignore=None, per_image=False):
"""
Array of IoU for each (non ignored) class
"""
if not per_image:
preds, labels = (preds,), (labels,)
ious = []
for pred, label in zip(preds, labels):
iou = []
for i in range(C):
if i != ignore: # The ignored label is sometimes among predicted classes (ENet - CityScapes)
intersection = ((label == i) & (pred == i)).sum()
union = ((label == i) | ((pred == i) & (label != ignore))).sum()
if not union:
iou.append(EMPTY)
else:
iou.append(float(intersection) / union)
ious.append(iou)
ious = map(utils.mean, zip(*ious)) # mean accross images if per_image
return 100 * np.array(ious)
def lovasz_softmax(probas, labels, only_present=False, per_image=False, ignore=None):
"""
Multi-class Lovasz-Softmax loss
probas: [B, C, H, W] Variable, class probabilities at each prediction (between 0 and 1)
labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1)
only_present: average only on classes present in ground truth
per_image: compute the loss per image instead of per batch
ignore: void class labels
"""
if per_image:
loss = utils.mean(lovasz_softmax_flat(*flatten_probas(prob, lab, ignore), only_present=only_present)
for prob, lab in zip(probas, labels))
else:
loss = lovasz_softmax_flat(*flatten_probas(probas, labels, ignore), only_present=only_present)
return loss
def lovasz_softmax_flat(probas, labels, only_present=False):
"""
Multi-class Lovasz-Softmax loss
probas: [P, C] Variable, class probabilities at each prediction (between 0 and 1)
labels: [P] Tensor, ground truth labels (between 0 and C - 1)
only_present: average only on classes present in ground truth
"""
C = probas.size(1)
losses = []
for c in range(C):
fg = (labels == c).float() # foreground for class c
if only_present and fg.sum() == 0:
continue
errors = (fg - probas[:, c]).abs()
errors_sorted, perm = torch.sort(errors, 0, descending=True)
perm = perm.data
fg_sorted = fg[perm]
losses.append(torch.dot(errors_sorted, lovasz_grad(fg_sorted)))
return utils.mean(losses)
def flatten_probas(scores, labels, ignore=None):
"""
Flattens predictions in the batch
"""
B, C, H, W = scores.size()
scores = scores.permute(0, 2, 3, 1).contiguous().view(-1, C) # B * H * W, C = P, C
labels = labels.view(-1)
if ignore is None:
return scores, labels
valid = (labels != ignore)
vscores = scores[valid.nonzero().squeeze()]
vlabels = labels[valid]
return vscores, vlabels
if __name__ == "__main__":
from torch.autograd import Variable
while True:
dummy_in = Variable(torch.randn(2, 3, 32, 32), requires_grad=True)
dummy_gt = Variable(torch.LongTensor(2, 32, 32).random_(0, 3))
dummy_in = F.softmax(dummy_in, dim=1)
loss = lovasz_softmax(dummy_in, dummy_gt, ignore=255)
print(loss.data[0])
| [
"torch.sort",
"torch.nn.functional.nll_loss",
"numpy.put",
"torch.eye",
"torch.unsqueeze",
"torch.LongTensor",
"scripts.utils.mean",
"torch.pow",
"torch.from_numpy",
"torch.randn",
"numpy.array",
"numpy.zeros",
"torch.nn.functional.cross_entropy",
"torch.nn.functional.log_softmax",
"torc... | [((1750, 1777), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['input'], {'dim': '(1)'}), '(input, dim=1)\n', (1763, 1777), True, 'import torch.nn.functional as F\n'), ((2124, 2202), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['log_p', 'target'], {'ignore_index': '(250)', 'weight': 'weight', 'size_average': '(False)'}), '(log_p, target, ignore_index=250, weight=weight, size_average=False)\n', (2134, 2202), True, 'import torch.nn.functional as F\n'), ((10048, 10064), 'scripts.utils.mean', 'utils.mean', (['ious'], {}), '(ious)\n', (10058, 10064), True, 'import scripts.utils as utils\n'), ((12524, 12542), 'scripts.utils.mean', 'utils.mean', (['losses'], {}), '(losses)\n', (12534, 12542), True, 'import scripts.utils as utils\n'), ((1460, 1539), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['predict', 'target'], {'weight': 'weight', 'size_average': 'self.size_average'}), '(predict, target, weight=weight, size_average=self.size_average)\n', (1475, 1539), True, 'import torch.nn.functional as F\n'), ((3062, 3089), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['input'], {'dim': '(1)'}), '(input, dim=1)\n', (3075, 3089), True, 'import torch.nn.functional as F\n'), ((3468, 3571), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['log_p', 'target'], {'weight': 'weight', 'ignore_index': '(250)', 'reduce': '(False)', 'size_average': 'size_average'}), '(log_p, target, weight=weight, ignore_index=250, reduce=False,\n size_average=size_average)\n', (3478, 3571), True, 'import torch.nn.functional as F\n'), ((7059, 7086), 'torch.nn.functional.softmax', 'F.softmax', (['cls_preds'], {'dim': '(1)'}), '(cls_preds, dim=1)\n', (7068, 7086), True, 'import torch.nn.functional as F\n'), ((8396, 8452), 'numpy.zeros', 'np.zeros', (['(batch_size, self.num_classes)'], {'dtype': 'np.uint8'}), '((batch_size, self.num_classes), dtype=np.uint8)\n', (8404, 8452), True, 'import numpy as np\n'), ((8473, 8494), 'numpy.arange', 'np.arange', (['batch_size'], {}), '(batch_size)\n', (8482, 8494), True, 'import numpy as np\n'), ((8684, 8788), 'torch.nn.functional.binary_cross_entropy_with_logits', 'F.binary_cross_entropy_with_logits', (['predicts', 'enc_cls_target'], {'weight': 'None', 'size_average': 'size_average'}), '(predicts, enc_cls_target, weight=None,\n size_average=size_average)\n', (8718, 8788), True, 'import torch.nn.functional as F\n'), ((10942, 10956), 'numpy.array', 'np.array', (['ious'], {}), '(ious)\n', (10950, 10956), True, 'import numpy as np\n'), ((12348, 12386), 'torch.sort', 'torch.sort', (['errors', '(0)'], {'descending': '(True)'}), '(errors, 0, descending=True)\n', (12358, 12386), False, 'import torch\n'), ((13236, 13262), 'torch.nn.functional.softmax', 'F.softmax', (['dummy_in'], {'dim': '(1)'}), '(dummy_in, dim=1)\n', (13245, 13262), True, 'import torch.nn.functional as F\n'), ((5597, 5624), 'torch.eye', 'torch.eye', (['self.num_classes'], {}), '(self.num_classes)\n', (5606, 5624), False, 'import torch\n'), ((8508, 8547), 'numpy.put', 'np.put', (['encode[idx]', 'unique_cls[idx]', '(1)'], {}), '(encode[idx], unique_cls[idx], 1)\n', (8514, 8547), True, 'import numpy as np\n'), ((13098, 13123), 'torch.randn', 'torch.randn', (['(2)', '(3)', '(32)', '(32)'], {}), '(2, 3, 32, 32)\n', (13109, 13123), False, 'import torch\n'), ((3936, 3964), 'torch.unsqueeze', 'torch.unsqueeze', (['input[i]', '(0)'], {}), '(input[i], 0)\n', (3951, 3964), False, 'import torch\n'), ((4016, 4045), 'torch.unsqueeze', 'torch.unsqueeze', (['target[i]', '(0)'], {}), '(target[i], 0)\n', (4031, 4045), False, 'import torch\n'), ((7475, 7507), 'torch.pow', 'torch.pow', (['(1 - probs)', 'self.gamma'], {}), '(1 - probs, self.gamma)\n', (7484, 7507), False, 'import torch\n'), ((8186, 8207), 'numpy.arange', 'np.arange', (['batch_size'], {}), '(batch_size)\n', (8195, 8207), True, 'import numpy as np\n'), ((8564, 8588), 'torch.from_numpy', 'torch.from_numpy', (['encode'], {}), '(encode)\n', (8580, 8588), False, 'import torch\n'), ((13173, 13200), 'torch.LongTensor', 'torch.LongTensor', (['(2)', '(32)', '(32)'], {}), '(2, 32, 32)\n', (13189, 13200), False, 'import torch\n')] |
# Generated by Django 2.0.2 on 2018-05-01 06:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('upload', '0004_auto_20180430_2344'),
]
operations = [
migrations.RemoveField(
model_name='logfile',
name='id',
),
migrations.AlterField(
model_name='logfile',
name='log_name',
field=models.CharField(blank=True, max_length=255, primary_key=True, serialize=False, unique=True),
),
]
| [
"django.db.migrations.RemoveField",
"django.db.models.CharField"
] | [((234, 289), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""logfile"""', 'name': '"""id"""'}), "(model_name='logfile', name='id')\n", (256, 289), False, 'from django.db import migrations, models\n'), ((438, 535), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(255)', 'primary_key': '(True)', 'serialize': '(False)', 'unique': '(True)'}), '(blank=True, max_length=255, primary_key=True, serialize=\n False, unique=True)\n', (454, 535), False, 'from django.db import migrations, models\n')] |
from google.appengine.ext import db
from google.appengine.api import urlfetch
class PegasusFiles(db.Model):
name = db.StringProperty()
file = db.BlobProperty()
added = db.DateTimeProperty(auto_now_add=True)
| [
"google.appengine.ext.db.BlobProperty",
"google.appengine.ext.db.DateTimeProperty",
"google.appengine.ext.db.StringProperty"
] | [((117, 136), 'google.appengine.ext.db.StringProperty', 'db.StringProperty', ([], {}), '()\n', (134, 136), False, 'from google.appengine.ext import db\n'), ((145, 162), 'google.appengine.ext.db.BlobProperty', 'db.BlobProperty', ([], {}), '()\n', (160, 162), False, 'from google.appengine.ext import db\n'), ((172, 210), 'google.appengine.ext.db.DateTimeProperty', 'db.DateTimeProperty', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (191, 210), False, 'from google.appengine.ext import db\n')] |
#
# Copyright 2022 European Centre for Medium-Range Weather Forecasts (ECMWF)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation nor
# does it submit to any jurisdiction.
#
from copy import deepcopy
import pytest
import polytope_server.common.request as request
from polytope_server.common.user import User
class Test:
def setup_method(self, method):
self.user = User("joebloggs", "realm1")
self.user.attributes["extra_info"] = "realm1_specific_id"
def test_request(self):
r = request.Request(user=self.user, verb=request.Verb.RETRIEVE)
assert r.verb == request.Verb.RETRIEVE
assert r.verb != "retrieve" # enum should not evaluate directly
assert r.user == self.user
assert r.user.username == "joebloggs"
assert r.user.realm == "realm1"
assert r.user.attributes["extra_info"] == "realm1_specific_id"
def test_request_equality(self):
r1 = request.Request(user=self.user, verb=request.Verb.RETRIEVE)
r2 = request.Request(user=self.user, verb=request.Verb.RETRIEVE)
assert r1 != r2
r2.id = r1.id
r2.timestamp = r1.timestamp
assert r1 == r2
def test_request_cant_add_attribute(self):
r1 = request.Request()
with pytest.raises(AttributeError):
r1.new_attr = "test"
def test_request_serialization(self):
r1 = request.Request(user=self.user, verb=request.Verb.RETRIEVE)
d = r1.serialize()
assert d["verb"] == "retrieve"
assert d["user"] == self.user.serialize()
r2 = request.Request(from_dict=d)
r3 = request.Request()
r3.deserialize(d)
assert r2 == r1
assert r3 == r1
assert r2.verb == request.Verb.RETRIEVE
assert r3.status == r1.status
assert r2.user == self.user
def test_request_copy(self):
r1 = request.Request(user=self.user, verb=request.Verb.RETRIEVE)
r2 = deepcopy(r1)
assert r1 == r2
| [
"polytope_server.common.request.Request",
"polytope_server.common.user.User",
"pytest.raises",
"copy.deepcopy"
] | [((1033, 1060), 'polytope_server.common.user.User', 'User', (['"""joebloggs"""', '"""realm1"""'], {}), "('joebloggs', 'realm1')\n", (1037, 1060), False, 'from polytope_server.common.user import User\n'), ((1168, 1227), 'polytope_server.common.request.Request', 'request.Request', ([], {'user': 'self.user', 'verb': 'request.Verb.RETRIEVE'}), '(user=self.user, verb=request.Verb.RETRIEVE)\n', (1183, 1227), True, 'import polytope_server.common.request as request\n'), ((1591, 1650), 'polytope_server.common.request.Request', 'request.Request', ([], {'user': 'self.user', 'verb': 'request.Verb.RETRIEVE'}), '(user=self.user, verb=request.Verb.RETRIEVE)\n', (1606, 1650), True, 'import polytope_server.common.request as request\n'), ((1664, 1723), 'polytope_server.common.request.Request', 'request.Request', ([], {'user': 'self.user', 'verb': 'request.Verb.RETRIEVE'}), '(user=self.user, verb=request.Verb.RETRIEVE)\n', (1679, 1723), True, 'import polytope_server.common.request as request\n'), ((1891, 1908), 'polytope_server.common.request.Request', 'request.Request', ([], {}), '()\n', (1906, 1908), True, 'import polytope_server.common.request as request\n'), ((2042, 2101), 'polytope_server.common.request.Request', 'request.Request', ([], {'user': 'self.user', 'verb': 'request.Verb.RETRIEVE'}), '(user=self.user, verb=request.Verb.RETRIEVE)\n', (2057, 2101), True, 'import polytope_server.common.request as request\n'), ((2231, 2259), 'polytope_server.common.request.Request', 'request.Request', ([], {'from_dict': 'd'}), '(from_dict=d)\n', (2246, 2259), True, 'import polytope_server.common.request as request\n'), ((2273, 2290), 'polytope_server.common.request.Request', 'request.Request', ([], {}), '()\n', (2288, 2290), True, 'import polytope_server.common.request as request\n'), ((2534, 2593), 'polytope_server.common.request.Request', 'request.Request', ([], {'user': 'self.user', 'verb': 'request.Verb.RETRIEVE'}), '(user=self.user, verb=request.Verb.RETRIEVE)\n', (2549, 2593), True, 'import polytope_server.common.request as request\n'), ((2607, 2619), 'copy.deepcopy', 'deepcopy', (['r1'], {}), '(r1)\n', (2615, 2619), False, 'from copy import deepcopy\n'), ((1922, 1951), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (1935, 1951), False, 'import pytest\n')] |
#!/usr/bin/env python
import yaml
from pprint import pprint as pp
from napalm import get_network_driver
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
# Read YAML file
with open("my_devices.yml", 'r') as stream:
devices = yaml.load(stream)
driver = get_network_driver('ios')
pynet_rtr2_conn = driver(devices['pynet-rtr2']['hostname'], devices['pynet-rtr2']['username'], devices['pynet-rtr2']['password'], optional_args = devices['pynet-rtr2']['optional_args'])
new_route = "ip route 192.168.3.11 255.255.255.255 10.220.88.1\n"
pynet_rtr2_conn.open()
pynet_rtr2_conn.load_merge_candidate(config=new_route)
pp(pynet_rtr2_conn.compare_config())
input("Hit any key to continue!")
pynet_rtr2_conn.discard_config()
pp(pynet_rtr2_conn.compare_config())
input("Hit any key to continue!")
pynet_rtr2_conn.load_merge_candidate(config=new_route)
pynet_rtr2_conn.commit_config()
pynet_rtr2_conn.close()
| [
"yaml.load",
"requests.packages.urllib3.disable_warnings",
"napalm.get_network_driver"
] | [((194, 260), 'requests.packages.urllib3.disable_warnings', 'requests.packages.urllib3.disable_warnings', (['InsecureRequestWarning'], {}), '(InsecureRequestWarning)\n', (236, 260), False, 'import requests\n'), ((365, 390), 'napalm.get_network_driver', 'get_network_driver', (['"""ios"""'], {}), "('ios')\n", (383, 390), False, 'from napalm import get_network_driver\n'), ((337, 354), 'yaml.load', 'yaml.load', (['stream'], {}), '(stream)\n', (346, 354), False, 'import yaml\n')] |
import responses
from requests.exceptions import HTTPError
from infoblox import infoblox
from . import testcasefixture
class TestGetHost(testcasefixture.TestCaseWithFixture):
fixture_name = 'host_get'
@classmethod
def setUpClass(cls):
super(TestGetHost, cls).setUpClass()
with responses.RequestsMock() as res:
res.add(responses.GET,
'https://10.10.10.10/wapi/v1.6/record:host',
body=cls.body,
status=200)
cls.ip = cls.iba_ipa.get_host('host.domain.com')
def test_get_host(self):
response_test_dict = {'view': 'default',
'name': 'host.domain.com',
'ipv4addrs': [{'host':
'host.domain.com',
'ipv4addr': '192.168.40.10',
'_ref': 'record:host_ipv4addr/ZG5zLmhvc3RfYWRkcmVzcyQuX2RlZmF1bHQuY29tLmVxdWlmYXgudXMubGFicy5jaWEuYWFhLXRlc3Rob3N0LjEyLjYuNC4yLg:192.168.40.10/host.domain.com/default',
'configure_for_dhcp': False}],
'_ref': 'record:host/ZG5zLmhvc3QkLl9kZWZhdWx0LmNvbS5lcXVpZmF4LnVzLmxhYnMuY2lhLmFhYS10ZXN0aG9zdA:host.domain.com/default'}
self.assertDictEqual(self.ip, response_test_dict)
@responses.activate
def test_get_host_nohostfound(self):
responses.add(responses.GET,
'https://10.10.10.10/wapi/v1.6/record:host',
body='[]',
status=200)
with self.assertRaises(infoblox.InfobloxNotFoundException):
ip = self.iba_ipa.get_host('host.domain.com')
@responses.activate
def test_get_host_servererror(self):
responses.add(responses.GET,
'https://10.10.10.10/wapi/v1.6/record:host',
body='[]',
status=500)
with self.assertRaises(HTTPError):
ip = self.iba_ipa.get_host('host.domain.com')
| [
"responses.add",
"responses.RequestsMock"
] | [((1470, 1570), 'responses.add', 'responses.add', (['responses.GET', '"""https://10.10.10.10/wapi/v1.6/record:host"""'], {'body': '"""[]"""', 'status': '(200)'}), "(responses.GET, 'https://10.10.10.10/wapi/v1.6/record:host',\n body='[]', status=200)\n", (1483, 1570), False, 'import responses\n'), ((1833, 1933), 'responses.add', 'responses.add', (['responses.GET', '"""https://10.10.10.10/wapi/v1.6/record:host"""'], {'body': '"""[]"""', 'status': '(500)'}), "(responses.GET, 'https://10.10.10.10/wapi/v1.6/record:host',\n body='[]', status=500)\n", (1846, 1933), False, 'import responses\n'), ((308, 332), 'responses.RequestsMock', 'responses.RequestsMock', ([], {}), '()\n', (330, 332), False, 'import responses\n')] |